parquet-converter commited on
Commit
2ae63ac
·
1 Parent(s): 781abbe

Update parquet files (step 34 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/Provider/Providers/DFEHub.py +0 -44
  2. spaces/123Kumar/vits-uma-genshin-honkai123/modules.py +0 -388
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Film 4 and Enjoy Movies Offline Without Ads or Interruptions.md +0 -31
  4. spaces/1gistliPinn/ChatGPT4/Examples/Anya Dasha Crazy Holiday.md +0 -6
  5. spaces/1gistliPinn/ChatGPT4/Examples/Banjo Marathi Movie Download Dvdrip Movies REPACK.md +0 -6
  6. spaces/1gistliPinn/ChatGPT4/Examples/Bloody Ultra Core 3 Keygen High Quality.md +0 -6
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Angry Birds Space 2 APK - Download Now and Start Your Space Adventure with the Angry Birds.md +0 -175
  8. spaces/1phancelerku/anime-remove-background/Download CarX Drift Racing 2 Mod Apk Obb Data for Android.md +0 -139
  9. spaces/1phancelerku/anime-remove-background/Euro Truck Simulator 3 Europa The Ultimate Truck Driving Game for Android.md +0 -114
  10. spaces/1toTree/lora_test/ppdiffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py +0 -548
  11. spaces/44ov41za8i/FreeVC/models.py +0 -351
  12. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/partial_fc.py +0 -222
  13. spaces/AIatUIUC/CodeLATS/lats/lats.py +0 -233
  14. spaces/AONYLMR/White-box-Cartoonization/app.py +0 -108
  15. spaces/Adapter/CoAdapter/ldm/util.py +0 -200
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/texttranslation.js +0 -2
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/ColorInputBase.js +0 -145
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/tabpages/Factory.js +0 -13
  19. spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/replicate.py +0 -94
  20. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/torch2onnx.py +0 -59
  21. spaces/Alpaca233/SadTalker/src/utils/text2speech.py +0 -20
  22. spaces/Ameaou/academic-chatgpt3.1/theme.py +0 -231
  23. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/__init__.py +0 -0
  24. spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.py +0 -315
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_heun_discrete.py +0 -426
  26. spaces/Andy1621/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py +0 -9
  27. spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py +0 -52
  28. spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/retina_sepbn_head.py +0 -113
  29. spaces/ArtificialArtist007/Rate-my-Aiart/README.md +0 -13
  30. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/compat.py +0 -63
  31. spaces/Awesimo/jojogan/e4e/datasets/images_dataset.py +0 -33
  32. spaces/Benson/text-generation/Examples/9no Amanecer Rpg Mod Apk.md +0 -84
  33. spaces/Benson/text-generation/Examples/Android Stalker.md +0 -58
  34. spaces/Benson/text-generation/Examples/Apk De La Saga Del Verano.md +0 -59
  35. spaces/Benson/text-generation/Examples/Cuerda Hroe Vice Ciudad 6.5 Descarga.md +0 -71
  36. spaces/Benson/text-generation/Examples/Descargar El Certificado Del Consejo De Abogados De La India.md +0 -111
  37. spaces/Benson/text-generation/Examples/Descargar Genshin Impacto Paso En Un Vasto.md +0 -232
  38. spaces/BetterAPI/BetterChat_new/README.md +0 -13
  39. spaces/CVPR/BigDL-Nano_inference/data.py +0 -233
  40. spaces/CVPR/LIVE/thrust/cmake/ThrustBuildCompilerTargets.cmake +0 -150
  41. spaces/CVPR/lama-example/models/ade20k/segm_lib/nn/__init__.py +0 -2
  42. spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/logger.py +0 -93
  43. spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/__init__.py +0 -0
  44. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/dpt_depth.py +0 -109
  45. spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/modeling_llama.py +0 -755
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/__init__.py +0 -30
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/dependencies/__init__.py +0 -0
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/otlLib/optimize/gpos.py +0 -452
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttx.py +0 -469
  50. spaces/DaleChen/AutoGPT/Dockerfile +0 -38
spaces/101-5/gpt4free/g4f/Provider/Providers/DFEHub.py DELETED
@@ -1,44 +0,0 @@
1
- import os, requests
2
- from ...typing import sha256, Dict, get_type_hints
3
- import json
4
-
5
- url = "https://chat.dfehub.com/api/chat"
6
- model = ['gpt-3.5-turbo']
7
- supports_stream = False
8
- needs_auth = False
9
-
10
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
11
- base = ''
12
- for message in messages:
13
- base += '%s: %s\n' % (message['role'], message['content'])
14
- base += 'assistant:'
15
-
16
- headers = {
17
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
18
- }
19
- data = {
20
- "model": {
21
- "id": "gpt-3.5-turbo",
22
- "name": "GPT-3.5",
23
- "maxLength": 12000,
24
- "tokenLimit": 4000
25
- },
26
- "messages": [
27
- {
28
- "role": "user",
29
- "content": base
30
- }
31
- ],
32
- "key": "",
33
- "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
34
- "temperature": 1
35
- }
36
- response = requests.post(url, headers=headers, data=json.dumps(data))
37
- if response.status_code == 200:
38
- yield response.text
39
- else:
40
- print(f"Error Occurred::{response.status_code}")
41
- return None
42
-
43
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
44
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/123Kumar/vits-uma-genshin-honkai123/modules.py DELETED
@@ -1,388 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
8
- from torch.nn.utils import weight_norm, remove_weight_norm
9
-
10
- import commons
11
- from commons import init_weights, get_padding
12
- from transforms import piecewise_rational_quadratic_transform
13
-
14
-
15
- LRELU_SLOPE = 0.1
16
-
17
-
18
- class LayerNorm(nn.Module):
19
- def __init__(self, channels, eps=1e-5):
20
- super().__init__()
21
- self.channels = channels
22
- self.eps = eps
23
-
24
- self.gamma = nn.Parameter(torch.ones(channels))
25
- self.beta = nn.Parameter(torch.zeros(channels))
26
-
27
- def forward(self, x):
28
- x = x.transpose(1, -1)
29
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
30
- return x.transpose(1, -1)
31
-
32
-
33
- class ConvReluNorm(nn.Module):
34
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
35
- super().__init__()
36
- self.in_channels = in_channels
37
- self.hidden_channels = hidden_channels
38
- self.out_channels = out_channels
39
- self.kernel_size = kernel_size
40
- self.n_layers = n_layers
41
- self.p_dropout = p_dropout
42
- assert n_layers > 1, "Number of layers should be larger than 0."
43
-
44
- self.conv_layers = nn.ModuleList()
45
- self.norm_layers = nn.ModuleList()
46
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
47
- self.norm_layers.append(LayerNorm(hidden_channels))
48
- self.relu_drop = nn.Sequential(
49
- nn.ReLU(),
50
- nn.Dropout(p_dropout))
51
- for _ in range(n_layers-1):
52
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
53
- self.norm_layers.append(LayerNorm(hidden_channels))
54
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
55
- self.proj.weight.data.zero_()
56
- self.proj.bias.data.zero_()
57
-
58
- def forward(self, x, x_mask):
59
- x_org = x
60
- for i in range(self.n_layers):
61
- x = self.conv_layers[i](x * x_mask)
62
- x = self.norm_layers[i](x)
63
- x = self.relu_drop(x)
64
- x = x_org + self.proj(x)
65
- return x * x_mask
66
-
67
-
68
- class DDSConv(nn.Module):
69
- """
70
- Dialted and Depth-Separable Convolution
71
- """
72
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
73
- super().__init__()
74
- self.channels = channels
75
- self.kernel_size = kernel_size
76
- self.n_layers = n_layers
77
- self.p_dropout = p_dropout
78
-
79
- self.drop = nn.Dropout(p_dropout)
80
- self.convs_sep = nn.ModuleList()
81
- self.convs_1x1 = nn.ModuleList()
82
- self.norms_1 = nn.ModuleList()
83
- self.norms_2 = nn.ModuleList()
84
- for i in range(n_layers):
85
- dilation = kernel_size ** i
86
- padding = (kernel_size * dilation - dilation) // 2
87
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
88
- groups=channels, dilation=dilation, padding=padding
89
- ))
90
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
91
- self.norms_1.append(LayerNorm(channels))
92
- self.norms_2.append(LayerNorm(channels))
93
-
94
- def forward(self, x, x_mask, g=None):
95
- if g is not None:
96
- x = x + g
97
- for i in range(self.n_layers):
98
- y = self.convs_sep[i](x * x_mask)
99
- y = self.norms_1[i](y)
100
- y = F.gelu(y)
101
- y = self.convs_1x1[i](y)
102
- y = self.norms_2[i](y)
103
- y = F.gelu(y)
104
- y = self.drop(y)
105
- x = x + y
106
- return x * x_mask
107
-
108
-
109
- class WN(torch.nn.Module):
110
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
111
- super(WN, self).__init__()
112
- assert(kernel_size % 2 == 1)
113
- self.hidden_channels =hidden_channels
114
- self.kernel_size = kernel_size,
115
- self.dilation_rate = dilation_rate
116
- self.n_layers = n_layers
117
- self.gin_channels = gin_channels
118
- self.p_dropout = p_dropout
119
-
120
- self.in_layers = torch.nn.ModuleList()
121
- self.res_skip_layers = torch.nn.ModuleList()
122
- self.drop = nn.Dropout(p_dropout)
123
-
124
- if gin_channels != 0:
125
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
126
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
127
-
128
- for i in range(n_layers):
129
- dilation = dilation_rate ** i
130
- padding = int((kernel_size * dilation - dilation) / 2)
131
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
132
- dilation=dilation, padding=padding)
133
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
134
- self.in_layers.append(in_layer)
135
-
136
- # last one is not necessary
137
- if i < n_layers - 1:
138
- res_skip_channels = 2 * hidden_channels
139
- else:
140
- res_skip_channels = hidden_channels
141
-
142
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
143
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
144
- self.res_skip_layers.append(res_skip_layer)
145
-
146
- def forward(self, x, x_mask, g=None, **kwargs):
147
- output = torch.zeros_like(x)
148
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
149
-
150
- if g is not None:
151
- g = self.cond_layer(g)
152
-
153
- for i in range(self.n_layers):
154
- x_in = self.in_layers[i](x)
155
- if g is not None:
156
- cond_offset = i * 2 * self.hidden_channels
157
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
158
- else:
159
- g_l = torch.zeros_like(x_in)
160
-
161
- acts = commons.fused_add_tanh_sigmoid_multiply(
162
- x_in,
163
- g_l,
164
- n_channels_tensor)
165
- acts = self.drop(acts)
166
-
167
- res_skip_acts = self.res_skip_layers[i](acts)
168
- if i < self.n_layers - 1:
169
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
170
- x = (x + res_acts) * x_mask
171
- output = output + res_skip_acts[:,self.hidden_channels:,:]
172
- else:
173
- output = output + res_skip_acts
174
- return output * x_mask
175
-
176
- def remove_weight_norm(self):
177
- if self.gin_channels != 0:
178
- torch.nn.utils.remove_weight_norm(self.cond_layer)
179
- for l in self.in_layers:
180
- torch.nn.utils.remove_weight_norm(l)
181
- for l in self.res_skip_layers:
182
- torch.nn.utils.remove_weight_norm(l)
183
-
184
-
185
- class ResBlock1(torch.nn.Module):
186
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
187
- super(ResBlock1, self).__init__()
188
- self.convs1 = nn.ModuleList([
189
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
190
- padding=get_padding(kernel_size, dilation[0]))),
191
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
192
- padding=get_padding(kernel_size, dilation[1]))),
193
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
194
- padding=get_padding(kernel_size, dilation[2])))
195
- ])
196
- self.convs1.apply(init_weights)
197
-
198
- self.convs2 = nn.ModuleList([
199
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
200
- padding=get_padding(kernel_size, 1))),
201
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
202
- padding=get_padding(kernel_size, 1))),
203
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
204
- padding=get_padding(kernel_size, 1)))
205
- ])
206
- self.convs2.apply(init_weights)
207
-
208
- def forward(self, x, x_mask=None):
209
- for c1, c2 in zip(self.convs1, self.convs2):
210
- xt = F.leaky_relu(x, LRELU_SLOPE)
211
- if x_mask is not None:
212
- xt = xt * x_mask
213
- xt = c1(xt)
214
- xt = F.leaky_relu(xt, LRELU_SLOPE)
215
- if x_mask is not None:
216
- xt = xt * x_mask
217
- xt = c2(xt)
218
- x = xt + x
219
- if x_mask is not None:
220
- x = x * x_mask
221
- return x
222
-
223
- def remove_weight_norm(self):
224
- for l in self.convs1:
225
- remove_weight_norm(l)
226
- for l in self.convs2:
227
- remove_weight_norm(l)
228
-
229
-
230
- class ResBlock2(torch.nn.Module):
231
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
232
- super(ResBlock2, self).__init__()
233
- self.convs = nn.ModuleList([
234
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
235
- padding=get_padding(kernel_size, dilation[0]))),
236
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
237
- padding=get_padding(kernel_size, dilation[1])))
238
- ])
239
- self.convs.apply(init_weights)
240
-
241
- def forward(self, x, x_mask=None):
242
- for c in self.convs:
243
- xt = F.leaky_relu(x, LRELU_SLOPE)
244
- if x_mask is not None:
245
- xt = xt * x_mask
246
- xt = c(xt)
247
- x = xt + x
248
- if x_mask is not None:
249
- x = x * x_mask
250
- return x
251
-
252
- def remove_weight_norm(self):
253
- for l in self.convs:
254
- remove_weight_norm(l)
255
-
256
-
257
- class Log(nn.Module):
258
- def forward(self, x, x_mask, reverse=False, **kwargs):
259
- if not reverse:
260
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
261
- logdet = torch.sum(-y, [1, 2])
262
- return y, logdet
263
- else:
264
- x = torch.exp(x) * x_mask
265
- return x
266
-
267
-
268
- class Flip(nn.Module):
269
- def forward(self, x, *args, reverse=False, **kwargs):
270
- x = torch.flip(x, [1])
271
- if not reverse:
272
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
273
- return x, logdet
274
- else:
275
- return x
276
-
277
-
278
- class ElementwiseAffine(nn.Module):
279
- def __init__(self, channels):
280
- super().__init__()
281
- self.channels = channels
282
- self.m = nn.Parameter(torch.zeros(channels,1))
283
- self.logs = nn.Parameter(torch.zeros(channels,1))
284
-
285
- def forward(self, x, x_mask, reverse=False, **kwargs):
286
- if not reverse:
287
- y = self.m + torch.exp(self.logs) * x
288
- y = y * x_mask
289
- logdet = torch.sum(self.logs * x_mask, [1,2])
290
- return y, logdet
291
- else:
292
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
293
- return x
294
-
295
-
296
- class ResidualCouplingLayer(nn.Module):
297
- def __init__(self,
298
- channels,
299
- hidden_channels,
300
- kernel_size,
301
- dilation_rate,
302
- n_layers,
303
- p_dropout=0,
304
- gin_channels=0,
305
- mean_only=False):
306
- assert channels % 2 == 0, "channels should be divisible by 2"
307
- super().__init__()
308
- self.channels = channels
309
- self.hidden_channels = hidden_channels
310
- self.kernel_size = kernel_size
311
- self.dilation_rate = dilation_rate
312
- self.n_layers = n_layers
313
- self.half_channels = channels // 2
314
- self.mean_only = mean_only
315
-
316
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
317
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
318
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
319
- self.post.weight.data.zero_()
320
- self.post.bias.data.zero_()
321
-
322
- def forward(self, x, x_mask, g=None, reverse=False):
323
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
324
- h = self.pre(x0) * x_mask
325
- h = self.enc(h, x_mask, g=g)
326
- stats = self.post(h) * x_mask
327
- if not self.mean_only:
328
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
329
- else:
330
- m = stats
331
- logs = torch.zeros_like(m)
332
-
333
- if not reverse:
334
- x1 = m + x1 * torch.exp(logs) * x_mask
335
- x = torch.cat([x0, x1], 1)
336
- logdet = torch.sum(logs, [1,2])
337
- return x, logdet
338
- else:
339
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
340
- x = torch.cat([x0, x1], 1)
341
- return x
342
-
343
-
344
- class ConvFlow(nn.Module):
345
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
346
- super().__init__()
347
- self.in_channels = in_channels
348
- self.filter_channels = filter_channels
349
- self.kernel_size = kernel_size
350
- self.n_layers = n_layers
351
- self.num_bins = num_bins
352
- self.tail_bound = tail_bound
353
- self.half_channels = in_channels // 2
354
-
355
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
356
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
357
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
358
- self.proj.weight.data.zero_()
359
- self.proj.bias.data.zero_()
360
-
361
- def forward(self, x, x_mask, g=None, reverse=False):
362
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
363
- h = self.pre(x0)
364
- h = self.convs(h, x_mask, g=g)
365
- h = self.proj(h) * x_mask
366
-
367
- b, c, t = x0.shape
368
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
369
-
370
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
371
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
372
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
373
-
374
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
375
- unnormalized_widths,
376
- unnormalized_heights,
377
- unnormalized_derivatives,
378
- inverse=reverse,
379
- tails='linear',
380
- tail_bound=self.tail_bound
381
- )
382
-
383
- x = torch.cat([x0, x1], 1) * x_mask
384
- logdet = torch.sum(logabsdet * x_mask, [1,2])
385
- if not reverse:
386
- return x, logdet
387
- else:
388
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Film 4 and Enjoy Movies Offline Without Ads or Interruptions.md DELETED
@@ -1,31 +0,0 @@
1
- <br />
2
- <code>
3
- <h1>How to Download Film 4 and Watch Movies Online</h1>
4
- <p>If you are a movie lover, you may have heard of Film 4, a British free-to-air television channel that broadcasts a wide range of films, from classics to cults, from indie to mainstream. Film 4 is also available online, where you can stream or download movies on demand. In this article, we will show you how to download Film 4 and watch movies online.</p>
5
- <h2>What is Film 4?</h2>
6
- <p>Film 4 is a part of Channel 4, a public-service broadcaster in the UK. Film 4 was launched in 1982 as a subscription-based service, but became free-to-air in 2006. Film 4 is known for its diverse and quality programming, featuring films from various genres, countries, and eras. Film 4 also produces and co-produces original films, such as Slumdog Millionaire, The Favourite, and Three Billboards Outside Ebbing, Missouri.</p>
7
- <h2>download film 4</h2><br /><p><b><b>Download</b> >>> <a href="https://byltly.com/2uKwOc">https://byltly.com/2uKwOc</a></b></p><br /><br />
8
- <p>Film 4 has an online platform called All 4, where you can watch live TV or catch up on shows and movies that you missed. All 4 also has a section called Film 4 On Demand, where you can stream or download movies from the Film 4 library. You can access All 4 on various devices, such as computers, smartphones, tablets, smart TVs, game consoles, etc.</p>
9
- <h2>How to Download Film 4?</h2>
10
- <p>To download Film 4 and watch movies online, you need to follow these steps:</p>
11
- <ol>
12
- <li>Go to the All 4 website at https://www.channel4.com/ or download the All 4 app on your device.</li>
13
- <li>Sign up for a free account or log in if you already have one.</li>
14
- <li>Browse or search for the movie that you want to watch.</li>
15
- <li>Click on the movie and select the option to download it.</li>
16
- <li>Choose the quality and file size that suits your device and internet connection.</li>
17
- <li>Wait for the download to finish and enjoy your movie offline.</li>
18
- </ol>
19
- <p>Note that not all movies are available for download. You can check the availability by looking for the download icon next to the movie title. Also note that downloaded movies have an expiry date, which means you have to watch them within a certain period of time before they are deleted from your device.</p>
20
- <h2>What are the Benefits of Downloading Film 4?</h2>
21
- <p>Downloading Film 4 has many benefits, such as:</p>
22
- <ul>
23
- <li>You can watch movies offline without worrying about internet connection or data usage.</li>
24
- <li>You can watch movies anytime and anywhere without being tied to a TV schedule.</li>
25
- <li>You can choose the quality and file size that suits your device and storage space.</li>
26
- <li>You can avoid ads and interruptions that may occur when streaming online.</li>
27
- </ul>
28
- <h2>Conclusion</h2>
29
- <p>In this article, we have shown you how to download Film 4 and watch movies online. Film 4 is a great source of entertainment for movie lovers, offering a wide range of films from various genres, countries, and eras. By downloading Film 4, you can enjoy your movies offline without any hassle. We hope you found this article helpful and informative. If you have any questions</p> ddb901b051<br />
30
- <br />
31
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Anya Dasha Crazy Holiday.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Anya Dasha Crazy Holiday</h2><br /><p><b><b>DOWNLOAD</b> &#9675;&#9675;&#9675; <a href="https://imgfil.com/2uy1Si">https://imgfil.com/2uy1Si</a></b></p><br /><br />
2
-
3
- Find crazy holiday stock images in HD and millions of other royalty-free stock photos, illustrations and vectors in the Shutterstock collection. Thousands of new ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Banjo Marathi Movie Download Dvdrip Movies REPACK.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Banjo Marathi Movie Download Dvdrip Movies</h2><br /><p><b><b>Download File</b> &#9889; <a href="https://imgfil.com/2uy1N8">https://imgfil.com/2uy1N8</a></b></p><br /><br />
2
- <br />
3
- Banjo is a 2016 Indian Hindi-language Action Drama film, directed by Ravi Jadhav and ... "Banjo Movie Review: Riteish Deshmukh's Film is a Pale Shadow of Rock On - NDTV Movies". NDTVMovies.com ... Download as PDF · Printable version ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Bloody Ultra Core 3 Keygen High Quality.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Bloody Ultra Core 3 Keygen</h2><br /><p><b><b>Download File</b> &rArr;&rArr;&rArr; <a href="https://imgfil.com/2uxYx6">https://imgfil.com/2uxYx6</a></b></p><br /><br />
2
- <br />
3
- CMJ RADIO 200 AIRPLAY 50 200 AIRPLAY RADIO 200 ADDS C = Core Station A. WHTESTRFES CATPOWER CURSIVE dYNGOiCXM BLACK KEYS CORAL ... SAHARA HOTNGHTS SOLEDAD BROTHERS T-MNUS BLAaEYES ULTRA ... 6S BLUE NOTE 33 1/3 WAFS IVTYfVORNNG JACKET IGUANAS JAYHAWKS ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Angry Birds Space 2 APK - Download Now and Start Your Space Adventure with the Angry Birds.md DELETED
@@ -1,175 +0,0 @@
1
- <br />
2
- <h1>Angry Birds Space 2 APK: A Review of the Latest Version of the Popular Mobile Game</h1>
3
- <p>Angry Birds Space 2 APK is a physics-based puzzle game and the third game in the Angry Birds series. It was developed and released by Rovio Entertainment Ltd. in March 2021. It follows the tale of an orbital bead, which was stolen by greedy investors. In order to earn back the lost beads, your angry bird has to fly through different space portals and finish all levels in each stage.</p>
4
- <h2>angry birds space 2 apk</h2><br /><p><b><b>DOWNLOAD</b> &#10002; &#10002; &#10002; <a href="https://urlin.us/2uT1sc">https://urlin.us/2uT1sc</a></b></p><br /><br />
5
- <p>Angry Birds Space 2 APK is an exciting and fun game that offers intergalactic fun at every turn. It has over 300 levels across 10 planets, including our own Solar System. It also has new playable characters, special abilities, zero-gravity space adventures, trick shots, hidden bonus levels, daily missions, and more. If you are a fan of Angry Birds or puzzle games in general, you should definitely download and play this game.</p>
6
- <p>In this article, we will review the features, gameplay, tips, comparison, rating, and pros and cons of Angry Birds Space 2 APK. We will also show you how to download and install the game on your Android device. By the end of this article, you will have a clear idea of whether this game is worth your time and money or not.</p>
7
- <h2>Features of Angry Birds Space 2 APK</h2>
8
- <p>Angry Birds Space 2 APK has many features that make it stand out from other puzzle games. Here are some of them:</p>
9
- <p>angry birds space 2 apk download<br />
10
- angry birds space 2 apk mod<br />
11
- angry birds space 2 apk free<br />
12
- angry birds space 2 apk full version<br />
13
- angry birds space 2 apk android<br />
14
- angry birds space 2 apk latest version<br />
15
- angry birds space 2 apk offline<br />
16
- angry birds space 2 apk unlimited money<br />
17
- angry birds space 2 apk obb<br />
18
- angry birds space 2 apk hack<br />
19
- angry birds space 2 apk for pc<br />
20
- angry birds space 2 apk revdl<br />
21
- angry birds space 2 apk uptodown<br />
22
- angry birds space 2 apk pure<br />
23
- angry birds space 2 apk mirror<br />
24
- angry birds space 2 apk rexdl<br />
25
- angry birds space 2 apk data<br />
26
- angry birds space 2 apk old version<br />
27
- angry birds space 2 apk no ads<br />
28
- angry birds space 2 apk cracked<br />
29
- angry birds space 2 apk game<br />
30
- angry birds space 2 apk file<br />
31
- angry birds space 2 apk mob.org<br />
32
- angry birds space 2 apk apkpure<br />
33
- angry birds space 2 apk appvn<br />
34
- angry birds space 2 apk mod menu<br />
35
- angry birds space 2 apk all levels unlocked<br />
36
- angry birds space 2 apk android oyun club<br />
37
- angry birds space 2 apk andropalace<br />
38
- angry birds space 2 apk aptoide<br />
39
- angry birds space 2 apk android republic<br />
40
- angry birds space 2 apk blackmod<br />
41
- angry birds space 2 apk bluestacks<br />
42
- angry birds space 2 apk by rovio entertainment corporation<br />
43
- angry birds space 2 apk cheat codes<br />
44
- angry birds space 2 apk coins hack<br />
45
- angry birds space 2 apk direct download link<br />
46
- angry birds space 2 apk download for android phoneky.com <br />
47
- angry birds space 2 apk download highly compressed <br />
48
- angry birds space 2 apk download mobomarket</p>
49
- <ul>
50
- <li><b>Over 300 interstellar levels across 10 planets</b>: You can play over 300 levels across different planets, such as Cold Cuts, Red Planet, Utopia, Solar System, etc. Each planet has its own theme, challenges, enemies, and surprises. You can also unlock new episodes as you progress through the game.</li>
51
- <li><b>New playable characters and unique special abilities for each bird</b>: You can use different birds to fling at the pigs, such as Red Bird, Bomb Bird, Ice Bird, Lazer Bird, etc. Each bird has its own special ability that can help you in different situations. For example, Lazer Bird can change direction in mid-air, Ice Bird can freeze objects, etc. You can also unlock new birds as you play.</li>
52
- <li><b>Zero-gravity space adventures and trick shots using planets' gravity</b>: One of the most interesting features of Angry Birds Space 2 APK is the zero-gravity space environment. You can use the gravity of the planets to make trick shots and hit the pigs in creative ways. You can also use the space debris, asteroids, and other objects to your advantage. The game physics are realistic and fun to experiment with.</li>
53
- <li><b>Hidden bonus levels and beautifully detailed backgrounds</b>: You can find hidden bonus levels in each planet by looking for golden eggs, stars, or other clues. These bonus levels offer extra challenges and rewards. You can also enjoy the stunning graphics and backgrounds of the game, which are colorful, detailed, and immersive.</li>
54
- <li><b>Daily missions and achievements</b>: You can complete daily missions to earn coins, power-ups, and other rewards. You can also unlock achievements by completing certain tasks or reaching certain milestones. These features add more replay value and motivation to the game.</li>
55
- </ul>
56
- <h2>How to Download and Install Angry Birds Space 2 APK</h2>
57
- <p>Angry Birds Space 2 APK is not available on the Google Play Store, so you will need to download it from a third-party source. Here are the steps on how to do that:</p>
58
- <ol>
59
- <li><b>Enable unknown sources on your device</b>: Go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps from sources other than the Google Play Store.</li>
60
- <li><b>Download Angry Birds Space 2 APK file</b>: You can download the APK file from various websites, such as APKPure, APKMirror, etc. Make sure you download it from a trusted and reliable source. You can also scan the file with an antivirus app before installing it.</li>
61
- <li><b>Install Angry Birds Space 2 APK file</b>: Locate the downloaded file on your device and tap on it. Follow the instructions on the screen to install the app. It may take a few minutes depending on your device and internet speed.</li>
62
- <li><b>Launch Angry Birds Space 2 APK</b>: Once the installation is done, you can launch the app from your app drawer or home screen. Enjoy playing Angry Birds Space 2 APK!</li>
63
- </ol>
64
- <p>Tips on how to avoid malware and viruses when downloading APK files:</p>
65
- <ul>
66
- <li><b>Do some research before downloading</b>: Check the reviews, ratings, comments, and feedback of the app and the website you are downloading from. Look for any red flags or signs of malware or viruses.</li>
67
- <li><b>Use a VPN service</b>: A VPN service can protect your online privacy and security by encrypting your data and hiding your IP address. It can also help you bypass geo-restrictions and access blocked websites.</li>
68
- <li><b>Update your device and apps regularly</b>: Updating your device and apps can fix any bugs or vulnerabilities that may expose you to malware or viruses. It can also improve your device performance and stability.</li>
69
- </ul>
70
- <h2>Gameplay and Tips of Angry Birds Space 2 APK</h2>
71
- <p>Angry Birds Space 2 APK is a simple yet addictive game that anyone can play. Here are some basics on how to play the game and use the different birds and their abilities:</p>
72
- <ul>
73
- <li><b>How to play the game</b>: The game consists of different levels across different planets. In each level, you have to fling angry birds at the pigs using a slingshot. Your goal is to destroy all the pigs and their structures using as few birds as possible. You can also collect stars, coins, power-ups, and other items along the way.</li>
74
- <li><b>How to use the different birds and their abilities</b>: Each bird has its own color, shape, size, weight, and special ability. You can tap on the screen while flinging a bird to activate its ability. Here are some examples of the birds and their abilities:</li>
75
- <ul>
76
- <li><b>Red Bird</b>: The most basic bird that has no special ability. It is good for breaking wood and glass.</li>
77
- <li><b>Bomb Bird</b>: A black bird that explodes when tapped or after hitting something. It is good for breaking stone and metal.</li>
78
- <li><b>Ice Bird</b>: A blue bird that freezes objects when tapped or after hitting something. It is good for making objects brittle and easier to break.</li>
79
- <li><b>Lazer Bird</b>: A purple bird that changes direction in mid-air when tapped. It is good for hitting hard-to-reach targets or making curved shots.</li>
80
- <li><b>And more...</b>: There are many more birds that you can unlock and use in the game, such as Terence, Stella, Bubbles, Hal, etc. Each one has its own unique ability that can help you in different situations.</li>
81
- </ul>
82
- </ul>
83
- <p p>Tips and tricks on how to complete the levels and get three stars:</p>
84
- <ul>
85
- <li><b>Use the right bird for the right job</b>: Try to match the bird's ability with the type of material or structure you are aiming at. For example, use Bomb Bird for stone and metal, Ice Bird for wood and glass, etc.</li>
86
- <li><b>Use the gravity of the planets</b>: You can use the gravity of the planets to make curved shots or hit multiple targets with one bird. You can also use the space debris, asteroids, and other objects to bounce or ricochet your birds.</li>
87
- <li><b>Use power-ups wisely</b>: You can use power-ups to boost your birds' abilities or get extra birds. For example, you can use the King Sling to fling your birds faster and farther, the Sling Scope to aim more accurately, the Birdquake to shake the ground and make structures collapse, etc. However, power-ups are limited and cost coins, so use them sparingly and only when necessary.</li>
88
- <li><b>Replay levels to improve your score</b>: You can replay any level you have completed to try to get a better score or more stars. You can also try different strategies or birds to see what works best for you.</li>
89
- <li><b>Watch videos or read guides for help</b>: If you are stuck on a level or want to learn more tips and tricks, you can watch videos or read guides online. There are many websites and YouTube channels that offer walkthroughs, tutorials, and tips for Angry Birds Space 2 APK.</li>
90
- </ul>
91
- <h2>Comparison with Angry Birds Space</h2>
92
- <p>Angry Birds Space 2 APK is a sequel to Angry Birds Space, which was released in 2012. Angry Birds Space was the first game in the series that introduced the space theme and the zero-gravity physics. It was also a huge success and received positive reviews from critics and users alike.</p>
93
- <p>Angry Birds Space 2 APK is similar to Angry Birds Space in many ways, but it also has some differences. Here are some of them:</p>
94
- <table>
95
- <tr>
96
- <th>Angry Birds Space</th>
97
- <th>Angry Birds Space 2 APK</th>
98
- </tr>
99
- <tr>
100
- <td>- Has over 200 levels across 9 planets</td>
101
- <td>- Has over 300 levels across 10 planets</td>
102
- </tr>
103
- <tr>
104
- <td>- Has 8 playable characters with different abilities</td>
105
- <td>- Has 12 playable characters with different abilities</td>
106
- </tr>
107
- <tr>
108
- <td>- Has boss battles with King Pig, Fat Pig, etc.</td>
109
- <td>- Has boss battles with greedy investors, etc.</td>
110
- </tr>
111
- <tr>
112
- <td>- Has golden eggs and eggsteroids as hidden bonus levels</td>
113
- <td>- Has golden eggs and stars as hidden bonus levels</td>
114
- </tr>
115
- <tr>
116
- <td>- Has power-ups such as Super Seeds, Space Eagles, etc.</td>
117
- <td>- Has power-ups such as King Sling, Sling Scope, Birdquake, etc.</td>
118
- </tr>
119
- <tr <td>- Has a simple and cartoonish graphics style</td>
120
- <td>- Has a more detailed and realistic graphics style</td>
121
- </tr>
122
- <tr>
123
- <td>- Has a space-themed soundtrack and sound effects</td>
124
- <td>- Has a more varied and dynamic soundtrack and sound effects</td>
125
- </tr>
126
- </table>
127
- <p>Pros and cons of each version:</p>
128
- <table>
129
- <tr>
130
- <th>Angry Birds Space</th>
131
- <th>Angry Birds Space 2 APK</th>
132
- </tr>
133
- <tr>
134
- <td>- Pros: Original, innovative, fun, challenging, addictive, nostalgic</td>
135
- <td>- Pros: Improved, updated, expanded, diverse, engaging, rewarding</td>
136
- </tr>
137
- <tr>
138
- <td>- Cons: Repetitive, outdated, limited, easy, boring</td>
139
- <td>- Cons: Unoriginal, derivative, complex, hard, frustrating</td>
140
- </tr>
141
- </table>
142
- <p>Which one is better and why:</p>
143
- <p>Both Angry Birds Space and Angry Birds Space 2 APK are great games that offer hours of entertainment and enjoyment. However, if we have to choose one, we would say that Angry Birds Space 2 APK is better than Angry Birds Space. This is because Angry Birds Space 2 APK has more levels, characters, features, power-ups, graphics, and sounds than Angry Birds Space. It also has more variety, challenge, and replay value than Angry Birds Space. Therefore, we think that Angry Birds Space 2 APK is a superior game that deserves your attention and appreciation.</p>
144
- <h2>Rating and Review of Angry Birds Space 2 APK</h2>
145
- <p>Angry Birds Space 2 APK is a highly rated game by users and critics alike. It has an average rating of 4.5 out of 5 stars on various websites and platforms. It also has many positive reviews and feedback from users who praise the game for its gameplay, graphics, sound, features, etc.</p>
146
- <p>Here are some of the pros and cons of the game based on user feedback:</p>
147
- <ul>
148
- <li><b>Pros</b>: Fun, addictive, challenging, creative, beautiful, smooth, diverse, rewarding</li>
149
- <li><b>Cons</b>: Difficult, frustrating, buggy, laggy, expensive, ads, crashes</li>
150
- </ul>
151
- <p>Here are some of the strengths and weaknesses of the game based on gameplay, graphics , sound, etc.:</p>
152
- <ul>
153
- <li><b>Strengths</b>: The game has a unique and innovative gameplay that combines physics, puzzle, and strategy elements. It also has a stunning and realistic graphics style that creates a immersive and captivating space environment. The game also has a dynamic and varied soundtrack and sound effects that enhance the mood and atmosphere of the game.</li>
154
- <li><b>Weaknesses</b>: The game can be very difficult and frustrating at times, especially in the later levels. It can also have some bugs and glitches that affect the performance and stability of the game. The game also has some in-app purchases and ads that can be annoying and expensive.</li>
155
- </ul>
156
- <h2>Conclusion</h2>
157
- <p>Angry Birds Space 2 APK is a fantastic game that offers a lot of fun and challenge for puzzle lovers and Angry Birds fans. It has over 300 levels across 10 planets, new playable characters, special abilities, zero-gravity space adventures, trick shots, hidden bonus levels, daily missions, and more. It also has a beautiful and realistic graphics style, a dynamic and varied soundtrack and sound effects, and a simple and intuitive user interface.</p>
158
- <p>However, the game can also be very difficult and frustrating at times, especially in the later levels. It can also have some bugs and glitches that affect the performance and stability of the game. The game also has some in-app purchases and ads that can be annoying and expensive.</p>
159
- <p>Therefore, we recommend that you download and play Angry Birds Space 2 APK if you are looking for a fun and challenging puzzle game that will keep you entertained for hours. However, be prepared to face some difficulties and frustrations along the way. You can also compare it with Angry Birds Space to see which one you like better.</p>
160
- <h3>FAQs</h3>
161
- <p>Here are some frequently asked questions about Angry Birds Space 2 APK:</p>
162
- <ul>
163
- <li><b>Q1: Is Angry Birds Space 2 APK free?</b></li>
164
- <li><b>A1: Yes, Angry Birds Space 2 APK is free to download and play. However, it has some in-app purchases and ads that can enhance your gaming experience or remove some limitations.</b></li>
165
- <li><b>Q2: Is Angry Birds Space 2 APK safe?</b></li>
166
- <li><b>A2: Yes, Angry Birds Space 2 APK is safe to download and play. However, you should always download it from a trusted and reliable source. You should also scan the file with an antivirus app before installing it. You should also enable unknown sources on your device only when necessary.</b></li>
167
- <li><b>Q3: Is Angry Birds Space 2 APK compatible with my device?</b></li>
168
- <li><b>A3: Angry Birds Space 2 APK is compatible with most Android devices that run on Android 4.4 or higher. However, some devices may have different specifications or features that may affect the performance or compatibility of the game. You can check the minimum requirements of the game on the website or platform you are downloading from.</b></li>
169
- <li><b>Q4: How can I update Angry Birds Space 2 APK?</b></li>
170
- <li><b>A4: You can update Angry Birds Space 2 APK by downloading and installing the latest version of the game from the same source you downloaded it from. You can also check for updates within the game settings or menu.</b></li>
171
- <li><b>Q5: How can I contact the developers of Angry Birds Space 2 APK?</b></li>
172
- <li><b>A5: You can contact the developers of Angry Birds Space 2 APK by visiting their official website or social media pages. You can also send them an email or leave them a feedback or review on the website or platform you downloaded the game from.</b></li>
173
- </ul></p> 197e85843d<br />
174
- <br />
175
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download CarX Drift Racing 2 Mod Apk Obb Data for Android.md DELETED
@@ -1,139 +0,0 @@
1
- <br />
2
- <h1>CarX Drift Racing 2 OBB: A Guide for Android Users</h1>
3
- <p>If you are a fan of drifting games, you might have heard of CarX Drift Racing 2, one of the most popular and realistic drift racing games on Android. But did you know that there is a way to enhance your gaming experience even more? In this article, we will show you what CarX Drift Racing 2 OBB is, why you need it, how to download and install it on your device, and what are its features and benefits. Let's get started!</p>
4
- <h2>What is CarX Drift Racing 2 OBB and why do you need it?</h2>
5
- <p>OBB stands for Opaque Binary Blob, which is a type of file that contains additional data for some Android apps. These files are usually large in size and are stored in a separate folder on your device. They work together with APK files, which are the main files that install apps on your device.</p>
6
- <h2>carx drift racing 2 obb</h2><br /><p><b><b>Download</b> &#10040; <a href="https://jinyurl.com/2uNSc2">https://jinyurl.com/2uNSc2</a></b></p><br /><br />
7
- <p>CarX Drift Racing 2 is a game that requires an OBB file to run properly. This is because the game has high-quality graphics, sound effects, and animations that cannot fit in a single APK file. The OBB file contains all the extra data that makes the game look and sound amazing.</p>
8
- <p>By using the OBB file for CarX Drift Racing 2, you can enjoy faster loading times, smoother performance, and more content in the game. You can access more tracks, cars, skins, and body parts that are not available in the APK file alone. You can also save space on your device by deleting the APK file after installing the OBB file.</p>
9
- <h2>How to download and install CarX Drift Racing 2 OBB on your Android device?</h2>
10
- <p>Downloading and installing CarX Drift Racing 2 OBB on your Android device is easy if you follow these steps:</p>
11
- <ol>
12
- <li>Download the APK file and the OBB file from a reliable source. You can find them on websites like [APKPure](^1^) or [GameGuardian](^2^). Make sure you download the latest version of both files.</li>
13
- <li>Install the APK file on your device by tapping on it. You might need to enable unknown sources in your settings to do this.</li>
14
- <li>Locate the OBB file on your device using a file manager app. It should be in a zip or rar format.</li>
15
- <li>Extract the O BB file to a folder on your device. The folder should be named com.carxtech.carxdr2 and should be located in Android/obb.</li>
16
- <li>Copy the OBB file to the com.carxtech.carxdr2 folder. The OBB file should have a name like main.1234.com.carxtech.carxdr2.obb, where 1234 is the version number.</li>
17
- <li>Launch the game and enjoy!</li>
18
- </ol>
19
- <p>If you have any problems with the installation, you can check the troubleshooting section below.</p>
20
- <h2>What are the features and benefits of CarX Drift Racing 2 OBB?</h2>
21
- <p>CarX Drift Racing 2 is a game that will satisfy your need for speed and adrenaline. It is a realistic and immersive drift racing game that lets you customize your cars, compete with other players, and master your drifting skills. Here are some of the features and benefits of CarX Drift Racing 2 OBB:</p>
22
- <ul>
23
- <li>Realistic physics: The game uses a sophisticated physics engine that simulates the behavior of real cars on different surfaces and conditions. You can feel the weight, traction, and inertia of your car as you drift around corners and curves.</li>
24
- <li>Customizable cars: You can choose from over 80 cars from different brands and models. You can also modify your cars with various body parts, skins, wheels, tires, suspension, engine, and more. You can create your own unique style and show it off to other players.</li>
25
- <li>Online multiplayer: You can join online races and tournaments with other players from around the world. You can challenge your friends or random opponents in different modes, such as tandem drifting, sprint racing, or capture the flag. You can also join clubs and teams to cooperate and compete with other players.</li>
26
- <li>Career mode: You can progress through a series of events and missions that will test your drifting skills and earn you rewards. You can unlock new cars, tracks, parts, and achievements as you advance in your career. You can also improve your reputation and rank among other drifters.</li>
27
- </ul>
28
- <p>By using the OBB file for CarX Drift Racing 2, you can access more content and features that are not available in the APK file alone. Here is a table that compares the game size and content with and without the OBB file:</p>
29
- <p>carx drift racing 2 apk obb download<br />
30
- carx drift racing 2 mod apk obb<br />
31
- carx drift racing 2 android game obb<br />
32
- carx drift racing 2 apk xapk obb<br />
33
- carx drift racing 2 apk combo obb<br />
34
- carx drift racing 2 apk data obb<br />
35
- carx drift racing 2 apk full obb<br />
36
- carx drift racing 2 apk latest version obb<br />
37
- carx drift racing 2 apk offline obb<br />
38
- carx drift racing 2 apk pure obb<br />
39
- carx drift racing 2 apk revdl obb<br />
40
- carx drift racing 2 apk unlimited money obb<br />
41
- carx drift racing 2 apk update obb<br />
42
- carx drift racing 2 game guardian obb<br />
43
- carx drift racing 2 game for android obb<br />
44
- carx drift racing 2 game for pc obb<br />
45
- carx drift racing 2 game free download obb<br />
46
- carx drift racing 2 game online obb<br />
47
- carx drift racing 2 game play store obb<br />
48
- carx drift racing 2 game review obb<br />
49
- carx drift racing 2 hack apk obb<br />
50
- carx drift racing 2 hack mod obb<br />
51
- carx drift racing 2 hack version obb<br />
52
- carx drift racing 2 install xapk obb<br />
53
- carx drift racing 2 mod menu obb<br />
54
- carx drift racing 2 mod money obb<br />
55
- carx drift racing 2 mod unlocked obb<br />
56
- carx drift racing 2 new update obb<br />
57
- carx drift racing 2 old version obb<br />
58
- carx drift racing 2 original apk obb<br />
59
- carx drift racing 2 premium apk obb<br />
60
- carx drift racing 2 pro apk obb<br />
61
- carx drift racing 2 rexdl apk obb<br />
62
- carx drift racing 2 sequel apk obb<br />
63
- carx drift racing 2 tips and tricks obb<br />
64
- carx drift racing 2 unlimited coins obb<br />
65
- carx drift racing 2 unlimited gold obb<br />
66
- carx drift racing 2 v1.1.0 apk obb<br />
67
- carx drift racing 2 v1.26.1 apk obb<br />
68
- how to download carx drift racing 2 with obb file <br />
69
- how to install carx drift racing 2 with obb file <br />
70
- how to play carx drift racing 2 with obb file <br />
71
- how to update carx drift racing 2 with obb file <br />
72
- what is the size of carx drift racing 2 with obb file <br />
73
- where to download carx drift racing 2 with obb file</p>
74
- <table>
75
- <tr>
76
- <th>Game size</th>
77
- <th>Tracks</th>
78
- <th>Cars</th>
79
- <th>Skins</th>
80
- <th>Body parts</th>
81
- </tr>
82
- <tr>
83
- <td>Without OBB file</td>
84
- <td>10</td>
85
- <td>20</td>
86
- <td>50</td>
87
- <td>100</td>
88
- </tr>
89
- <tr>
90
- <td>With OBB file</td>
91
- <td>30</td>
92
- <td>80</td>
93
- <td>200</td>
94
- <td>500</td>
95
- </tr>
96
- </table>
97
- <h3>Conclusion</h3>
98
- <p>In conclusion, CarX Drift Racing 2 OBB is a must-have for any drift racing fan who wants to enjoy the full potential of the game. By downloading and installing the OBB file on your Android device, you can experience faster loading times, smoother performance, and more content in the game. You can also save space on your device by deleting the APK file after installing the OBB file.</p>
99
- <p>If you are ready to take your drifting skills to the next level, download CarX Drift Racing 2 OBB today and join the millions of players who are already hooked on this amazing game. You will not regret it!</p>
100
- <h4>FAQs</h4>
101
- <p>Here are some frequently asked questions about CarX Drift Racing 2 OBB:</p>
102
- <ol>
103
- <li><b>How do I update CarX Drift Racing 2 OBB?</b></li>
104
- <p>To update CarX Drift Racing 2 OBB, you need to download the latest version of both the APK file and the OBB file from a reliable source. Then, you need to install the APK file on your device and copy the OBB file to the com.carxtech.carxdr2 folder on your device. You can overwrite the old files with the new ones.</p>
105
- <li><b>How do I fix errors or crashes with CarX Drift Racing 2 OBB?</b></li>
106
- <p>If you encounter any errors or crashes with CarX Drift Racing 2 OBB, you can try these solutions:</p>
107
- <ul>
108
- <li>Make sure you have enough storage space on your device.</li>
109
- <li>Make sure you have a stable internet connection.</li>
110
- <li>Make sure you have downloaded the correct version of both the APK file and the OBB file for your device.</li>
111
- <li>Make sure you have copied the OBB file to the correct folder on your device.</li>
112
- <li>Make sure you have granted the necessary permissions to the game, such as storage, network, and location.</li>
113
- <li>Clear the cache and data of the game from your settings.</li>
114
- <li>Restart your device and try again.</li>
115
- </ul>
116
- <p>If none of these solutions work, you can contact the developer of the game for further assistance.</p>
117
- <li><b>How do I uninstall CarX Drift Racing 2 OBB?</b></li>
118
- <p>To uninstall CarX Drift Racing 2 OBB, you need to delete both the APK file and the OBB file from your device. You can do this by following these steps:</p>
119
- <ol>
120
- <li>Go to your settings and find the app manager or application list.</li>
121
- <li>Find CarX Drift Racing 2 and tap on it.</li>
122
- <li>Tap on uninstall and confirm your choice.</li>
123
- <li>Go to your file manager app and find the Android/obb folder.</li>
124
- <li>Delete the com.carxtech.carxdr2 folder and its contents.</li>
125
- </ol>
126
- <p>You have successfully uninstalled CarX Drift Racing 2 OBB from your device.</p>
127
- <li><b>Is CarX Drift Racing 2 OBB safe to use?</b></li>
128
- <p>CarX Drift Racing 2 OBB is safe to use as long as you download it from a trusted source. You should avoid downloading it from unknown or suspicious websites that might contain malware or viruses. You should also scan the files with an antivirus app before installing them on your device. You should also be careful not to share your personal or financial information with any third-party apps or websites that claim to offer cheats or hacks for the game.</p>
129
- <li><b>What are some tips and tricks for CarX Drift Racing 2 OBB?</b></li>
130
- <p>Here are some tips and tricks that can help you improve your drifting skills and enjoy the game more:</p>
131
- <ul>
132
- <li>Practice on different tracks and cars to learn how they handle and react to different situations.</li>
133
- <li>Adjust your settings and controls to suit your preferences and comfort level.</li>
134
- <li>Use the tuning feature to optimize your car's performance and appearance.</li>
135
- <li>Watch replays and ghost races to learn from other players and improve your techniques.</li>
136
- <li>Join clubs and teams to chat, cooperate, and compete with other players.</li>
137
- </ul></p> 401be4b1e0<br />
138
- <br />
139
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Euro Truck Simulator 3 Europa The Ultimate Truck Driving Game for Android.md DELETED
@@ -1,114 +0,0 @@
1
- <br />
2
- <h1>European Truck Simulator 3: A Review of the Best Truck Driving Game</h1>
3
- <p>Do you love driving trucks and delivering cargo across different countries? Do you want to experience the thrill and challenge of being a professional trucker? If yes, then you should try European Truck Simulator 3, the latest and most realistic truck simulation game ever made. In this article, we will review ETS3 and tell you why it is the best truck driving game you can play. We will also show you how to download and install ETS3 APK on your Android device, so you can enjoy this amazing game anytime, anywhere.</p>
4
- <h2>european truck simulator 3 apk download</h2><br /><p><b><b>Download</b> &#10022;&#10022;&#10022; <a href="https://jinyurl.com/2uNOeX">https://jinyurl.com/2uNOeX</a></b></p><br /><br />
5
- <h2>What is European Truck Simulator 3?</h2>
6
- <p>Euro Truck Simulator 3 (ETS3) is a video game developed by SCS Software, the same studio that created the popular Euro Truck Simulator 2 (ETS2). ETS3 is the third instalment in the Euro Truck Simulator series, which started in 2008. ETS3 is expected to be released in 2028 for PC, PS5, and Xbox consoles, according to some rumors and news sources . However, there is no official confirmation or announcement from the developers yet.</p>
7
- <h3>The gameplay and features of ETS3</h3>
8
- <p>ETS3 is a truck simulation game that lets you drive various trucks and trailers across Europe. You can choose from different truck models, chassis configurations, customizations, and cosmetics. You can also select your job and deliver your cargo to different destinations. You have to follow the traffic rules, manage your fuel, damage, fatigue, and other factors that affect your driving performance. You can also interact with other drivers, hire employees, buy garages, and expand your business.</p>
9
- <p>ETS3 features a realistic and immersive truck physics system that makes driving feel like real life. You can feel the weight, speed, acceleration, braking, steering, suspension, and other aspects of your truck. You can also hear the authentic engine sounds, horn sounds, tire sounds, and other sound effects that add to the atmosphere. You can also adjust the camera angles, mirrors, lights, indicators, wipers, cruise control, and other controls to suit your preference.</p>
10
- <p>ETS3 also features a vast and detailed map of Europe that covers dozens of cities and countries. You can travel across highways, country roads, urban roads, mountain roads, tunnels, bridges, tolls, ferries, and other types of roads. You can see the landmarks, buildings, landscapes, weather conditions, day and night cycle, seasons, and other elements that make each location unique. You can also encounter different types of traffic vehicles, pedestrians, animals, events, accidents, roadworks, police patrols, tolls, and other situations that make your journey more dynamic and unpredictable.</p>
11
- <h3>The system requirements and platforms of ETS3</h3>
12
- <p>ETS3 is expected to have higher system requirements than ETS2 due to its improved graphics and physics engine. According to some sources, these are the minimum and recommended system requirements for ETS3 on PC:</p>
13
- <p>Truckers of Europe 3 android game download<br />
14
- How to install european truck simulator 3 on mobile<br />
15
- Best truck driving simulator games for android<br />
16
- European truck simulator 3 apk mod unlimited money<br />
17
- Download european truck simulator 3 latest version<br />
18
- European truck simulator 3 gameplay and features<br />
19
- Free download european truck simulator 3 for android<br />
20
- European truck simulator 3 review and rating<br />
21
- European truck simulator 3 cheats and tips<br />
22
- European truck simulator 3 online multiplayer mode<br />
23
- European truck simulator 3 system requirements and compatibility<br />
24
- European truck simulator 3 update and patch notes<br />
25
- European truck simulator 3 trailer and screenshots<br />
26
- European truck simulator 3 support and feedback<br />
27
- European truck simulator 3 download link and file size<br />
28
- How to play european truck simulator 3 offline<br />
29
- European truck simulator 3 maps and routes<br />
30
- European truck simulator 3 customization and options<br />
31
- European truck simulator 3 realistic physics and graphics<br />
32
- European truck simulator 3 challenges and achievements<br />
33
- How to unlock all trucks in european truck simulator 3<br />
34
- European truck simulator 3 vs american truck simulator<br />
35
- How to backup and restore european truck simulator 3 data<br />
36
- European truck simulator 3 guide and tutorial<br />
37
- European truck simulator 3 best trucks and trailers<br />
38
- How to fix european truck simulator 3 errors and bugs<br />
39
- European truck simulator 3 mod apk download free<br />
40
- How to connect european truck simulator 3 with steering wheel<br />
41
- European truck simulator 3 sound effects and music<br />
42
- European truck simulator 3 traffic and weather conditions<br />
43
- How to speed up european truck simulator 3 performance<br />
44
- European truck simulator 3 skins and accessories<br />
45
- How to change language in european truck simulator 3<br />
46
- European truck simulator 3 forum and community<br />
47
- European truck simulator 3 news and updates<br />
48
- How to record and share european truck simulator 3 gameplay videos<br />
49
- European truck simulator 3 tips and tricks for beginners<br />
50
- How to get more money in european truck simulator 3<br />
51
- European truck simulator 3 comparison with other simulators<br />
52
- How to download european truck simulator 3 for pc<br />
53
- European truck simulator 3 controller support and settings<br />
54
- How to enable vr mode in european truck simulator 3<br />
55
- European truck simulator 3 best mods and addons<br />
56
- How to create your own mods for european truck simulator 3<br />
57
- European truck simulator 3 faq and troubleshooting<br />
58
- How to join a convoy in european truck simulator 3 online mode</p>
59
- <table>
60
- <tr>
61
- <th>Minimum</th>
62
- <th>Recommended</th>
63
- </tr>
64
- <tr>
65
- <td>OS: Windows XP or Windows Vista<br>CPU: Processor 2.4 GHz Intel Pentium 4 or equivalent<br>GPU: 128 MB video card: GeForce 4 (not MX!) or better, ATI Radeon 8500 or better<br>RAM: 512 MB RAM (1 GB on Windows Vista)<br>HDD: 600 MB of free hard drive space<br>DirectX: DirectX 9.0</td>
66
- <td>OS: Windows XP or Windows Vista<br>CPU: Processor 3.0 GHz Intel Pentium 4 or equivalent<br>GPU: 256 MB video card: GeForce 6 or better, ATI Radeon 9800 or better<br>RAM: 1 GB RAM (2 GB on Windows Vista)<br>HDD: 1.5 GB of free hard drive space<br>DirectX: DirectX 9.0c</td>
67
- </tr>
68
- </table>
69
- <p>ETS3 is also expected to be released for PS5 and Xbox consoles, according to some rumors and news sources . However, there is no official confirmation or announcement from the developers yet. The console versions of ETS3 may have different features and gameplay modes than the PC version, such as online multiplayer, controller support, achievements, and other options.</p>
70
- <h2>Why should you play European Truck Simulator 3?</h2>
71
- <p>If you are a fan of truck driving games, then you should definitely play ETS3. ETS3 is the best truck simulation game ever made, and it offers many reasons to play it. Here are some of the main reasons why you should play ETS3:</p>
72
- <h3>The realistic and immersive truck driving experience</h3>
73
- <p>ETS3 gives you the opportunity to experience what it is like to be a real truck driver. You can drive various trucks and trailers across Europe, following the traffic rules, managing your fuel, damage, fatigue, and other factors that affect your driving performance. You can also interact with other drivers, hire employees, buy garages, and expand your business. You can feel the realistic and immersive truck physics system that makes driving feel like real life. You can also hear the authentic engine sounds, horn sounds, tire sounds, and other sound effects that add to the atmosphere. You can also adjust the camera angles, mirrors, lights, indicators, wipers, cruise control, and other controls to suit your preference.</p>
74
- <h3>The variety and customization of trucks and trailers</h3>
75
- <p>ETS3 lets you choose from different truck models, chassis configurations, customizations, and cosmetics. You can select from over 50 licensed truck brands, such as Volvo, Scania, Mercedes-Benz, MAN, DAF, Renault, Iveco, and more. You can also customize your truck with different engines, transmissions, axles, suspensions, tires, colors, paint jobs, stickers, accessories, and more. You can also choose from different types of trailers, such as flatbeds, curtainsiders, refrigerated, tankers, low loaders, car carriers, and more. You can also customize your trailer with different cargoes, weights, lengths, widths, heights, colors, paint jobs, stickers, accessories, and more.</p>
76
- <h3>The exploration and discovery of Europe</h3>
77
- <p>ETS3 lets you explore and discover Europe in a way that no other game can. You can travel across highways, country roads, urban roads, mountain roads, tunnels, bridges, tolls, ferries, and other types of roads. You can see the landmarks, buildings, landscapes, weather conditions, day and night cycle, seasons, and other elements that make each location unique. You can also encounter different types of traffic vehicles, pedestrians, animals, events, accidents, roadworks, police patrols, tolls, and other situations that make your journey more dynamic and unpredictable. You can visit dozens of cities and countries in Europe, such as London, Paris, Berlin, Rome, Madrid, Amsterdam, Stockholm, Warsaw, Prague, Vienna, Zurich, Lisbon, Dublin, and more.</p>
78
- <h2>How to download and install European Truck Simulator 3 APK?</h2>
79
- <p>If you want to play ETS3 on your Android device, you will need to download and install ETS3 APK. ETS3 APK is a file that contains the game data and allows you to install it on your device without using the Google Play Store or any other app store. However, downloading and installing ETS3 APK is not as simple as it sounds. There are some risks and challenges involved in this process. Here are some of the steps and precautions you need to follow to download and install ETS3 APK safely and successfully:</p>
80
- <h3>The steps to download and install ETS3 APK on Android devices</h3>
81
- <ol>
82
- <li>Find a reliable and trustworthy source for ETS3 APK. There are many websites that claim to offer ETS3 APK for free download, but not all of them are safe and legitimate. Some of them may contain malware or viruses that can harm your device or steal your personal information. Some of them may also offer fake or outdated versions of ETS3 APK that may not work properly or at all. Therefore, you need to be careful and do some research before downloading ETS3 APK from any source.</li>
83
- <li>Download ETS3 APK file to your device. Once you have found a reliable and trustworthy source for ETS3 APK, you can download the file to your device. You may need to enable the option to download files from unknown sources in your device settings. You may also need to grant some permissions to the source website or app to access your device storage. You should also check the file size and name before downloading it, and make sure it matches the expected values.</li>
84
- <li>Install ETS3 APK on your device. After downloading ETS3 APK file to your device, you can install it by tapping on it. You may need to confirm the installation and grant some permissions to the game app to access your device features. You should also read the terms and conditions and privacy policy of the game app before installing it. You may also need to verify your device compatibility and security before installing it.</li>
85
- <li>Launch ETS3 APK on your device. After installing ETS3 APK on your device, you can launch it by tapping on its icon. You may need to sign in with your account or create a new one to access the game features. You may also need to download some additional data or updates for the game to run smoothly. You should also check the game settings and adjust them according to your preference and device performance.</li>
86
- </ol>
87
- <h3>The precautions and tips to avoid malware and viruses</h3>
88
- <p>Downloading and installing ETS3 APK on your Android device can be risky and challenging, as there are many potential threats and problems that can occur. Here are some of the precautions and tips you need to follow to avoid malware and viruses when downloading and installing ETS3 APK:</p>
89
- <ul>
90
- <li>Use a reputable antivirus or security app on your device. You should always have a reliable and updated antivirus or security app on your device that can scan and protect your device from malware and viruses. You should also run a full scan of your device before and after downloading and installing ETS3 APK, and delete any suspicious or harmful files or apps.</li>
91
- <li>Use a secure and stable internet connection. You should always use a secure and stable internet connection when downloading and installing ETS3 APK, as a weak or unstable connection can cause errors or interruptions in the process. You should also avoid using public or unsecured Wi-Fi networks, as they can expose your device to hackers or cyberattacks.</li>
92
- <li>Use a backup or recovery tool on your device. You should always have a backup or recovery tool on your device that can save and restore your data and settings in case of any damage or loss. You should also backup your data and settings before and after downloading and installing ETS3 APK, and restore them if needed.</li>
93
- <li>Use a trusted and verified source for ETS3 APK. You should always use a trusted and verified source for ETS3 APK, as an untrusted or unverified source can provide fake or infected files or apps that can harm your device or steal your personal information. You should also check the reviews, ratings, comments, feedback, and reputation of the source before downloading and installing ETS3 APK, and avoid any source that has negative or suspicious signs.</li>
94
- </ul>
95
- <h2>Conclusion</h2>
96
- <p>Euro Truck Simulator 3 (ETS3) is a truck simulation game that lets you drive various trucks and trailers across Europe. It is the best truck driving game ever made, as it offers a realistic and immersive truck driving experience, a variety and customization of trucks and trailers, and an exploration and discovery of Europe. It is expected to be released in 2028 for PC, PS5, and Xbox consoles, according to some rumors and news sources . However, there is no official confirmation or announcement from the developers yet.</p>
97
- <p>If you want to play ETS3 on your Android device, you will need to download and install ETS3 APK. ETS3 APK is a file that contains the game data and allows you to install it on your device without using the Google Play Store or any other app store. However, downloading and installing ETS3 APK is not as simple as it sounds. There are some risks and challenges involved in this process. You will need to follow some steps and precautions to download and install ETS3 APK safely and successfully.</p>
98
- <p>We hope this article has helped you understand what ETS3 is, why you should play it, and how to download and install ETS3 APK on your Android device. If you have any questions, comments, or feedback, please feel free to share them with us. We would love to hear from you. Thank you for reading and happy trucking!</p>
99
- <h2>FAQs</h2>
100
- <p>Here are some of the frequently asked questions (FAQs) about ETS3 and ETS3 APK:</p>
101
- <ol>
102
- <li><b > What is the difference between ETS3 and ETS2?</b>
103
- <p>ETS3 and ETS2 are both truck simulation games developed by SCS Software, but they have some differences. ETS3 is the latest and most realistic truck simulation game ever made, while ETS2 is the previous and most popular truck simulation game in the series. ETS3 has improved graphics and physics engine, more truck models and customizations, more trailer types and cargoes, more cities and countries, more road types and situations, and more features and gameplay modes than ETS2. However, ETS3 is not yet released, while ETS2 is available for PC, Mac, Linux, PS4, and Xbox One.</p>
104
- <li><b>Is ETS3 APK safe and legal to download and install?</b></li>
105
- <p>ETS3 APK is not safe or legal to download and install on your Android device. ETS3 APK is a file that contains the game data and allows you to install it on your device without using the Google Play Store or any other app store. However, this file is not authorized or verified by the developers or the app stores, and it may contain malware or viruses that can harm your device or steal your personal information. It may also violate the terms and conditions and privacy policy of the game app and the app stores, and it may result in legal actions or penalties. Therefore, you should avoid downloading and installing ETS3 APK on your Android device.</p>
106
- <li><b>How much does ETS3 cost and where can I buy it?</b></li>
107
- <p>ETS3 is expected to cost around $40-$60 USD for PC, PS5, and Xbox consoles, according to some rumors and news sources . However, there is no official confirmation or announcement from the developers yet. You can buy ETS3 from the official website of SCS Software or from other online or offline retailers that sell video games. However, you will have to wait until ETS3 is released, which may take a few years.</p>
108
- <li><b>Can I play ETS3 online with other players?</b></li>
109
- <p>ETS3 may have an online multiplayer mode that allows you to play with other players around the world. You may be able to join or create a convoy of trucks, chat with other drivers, compete in races or challenges, cooperate in missions or deliveries, share your customizations or screenshots, and more. However, there is no official confirmation or announcement from the developers yet about the online multiplayer mode of ETS3.</p>
110
- <li><b>Can I mod ETS3 or use mods from ETS2?</b></li>
111
- <p>ETS3 may have a modding support that allows you to create or use mods for the game. Mods are modifications that change or add something to the game, such as new trucks, trailers, cargoes, maps, roads, traffic, weather, sounds, graphics, gameplay features, and more. However, there is no official confirmation or announcement from the developers yet about the modding support of ETS3. You may not be able to use mods from ETS2 for ETS3, as they may not be compatible or updated for the new game.</p>
112
- </ol></p> 197e85843d<br />
113
- <br />
114
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py DELETED
@@ -1,548 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import inspect
17
- from typing import Callable, List, Optional, Union
18
-
19
- import numpy as np
20
- import paddle
21
- import PIL
22
- from packaging import version
23
-
24
- from paddlenlp.transformers import CLIPFeatureExtractor, XLMRobertaTokenizer
25
-
26
- from ...configuration_utils import FrozenDict
27
- from ...models import AutoencoderKL, UNet2DConditionModel
28
- from ...pipeline_utils import DiffusionPipeline
29
- from ...schedulers import (
30
- DDIMScheduler,
31
- DPMSolverMultistepScheduler,
32
- EulerAncestralDiscreteScheduler,
33
- EulerDiscreteScheduler,
34
- LMSDiscreteScheduler,
35
- PNDMScheduler,
36
- )
37
- from ...utils import PIL_INTERPOLATION, deprecate, logging
38
- from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
39
- from . import AltDiffusionPipelineOutput, RobertaSeriesModelWithTransformation
40
-
41
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
-
43
-
44
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
45
- def preprocess(image):
46
- if isinstance(image, paddle.Tensor):
47
- return image
48
- elif isinstance(image, PIL.Image.Image):
49
- image = [image]
50
-
51
- if isinstance(image[0], PIL.Image.Image):
52
- w, h = image[0].size
53
- w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
54
-
55
- image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
56
- image = np.concatenate(image, axis=0)
57
- image = np.array(image).astype(np.float32) / 255.0
58
- image = image.transpose(0, 3, 1, 2)
59
- image = 2.0 * image - 1.0
60
- image = paddle.to_tensor(image)
61
- elif isinstance(image[0], paddle.Tensor):
62
- image = paddle.concat(image, axis=0)
63
- return image
64
-
65
-
66
- class AltDiffusionImg2ImgPipeline(DiffusionPipeline):
67
- r"""
68
- Pipeline for text-guided image to image generation using Alt Diffusion.
69
-
70
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
71
- library implements for all the pipelines (such as downloading or saving etc.)
72
-
73
- Args:
74
- vae ([`AutoencoderKL`]):
75
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
76
- text_encoder ([`RobertaSeriesModelWithTransformation`]):
77
- Frozen text-encoder. Alt Diffusion uses the text portion of
78
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.RobertaSeriesModelWithTransformation),
79
- specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
80
- tokenizer (`XLMRobertaTokenizer`):
81
- Tokenizer of class
82
- [XLMRobertaTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.XLMRobertaTokenizer).
83
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
84
- scheduler ([`SchedulerMixin`]):
85
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
86
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
87
- safety_checker ([`StableDiffusionSafetyChecker`]):
88
- Classification module that estimates whether generated images could be considered offensive or harmful.
89
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
90
- feature_extractor ([`CLIPFeatureExtractor`]):
91
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
92
- """
93
- _optional_components = ["safety_checker", "feature_extractor"]
94
-
95
- def __init__(
96
- self,
97
- vae: AutoencoderKL,
98
- text_encoder: RobertaSeriesModelWithTransformation,
99
- tokenizer: XLMRobertaTokenizer,
100
- unet: UNet2DConditionModel,
101
- scheduler: Union[
102
- DDIMScheduler,
103
- PNDMScheduler,
104
- LMSDiscreteScheduler,
105
- EulerDiscreteScheduler,
106
- EulerAncestralDiscreteScheduler,
107
- DPMSolverMultistepScheduler,
108
- ],
109
- safety_checker: StableDiffusionSafetyChecker,
110
- feature_extractor: CLIPFeatureExtractor,
111
- requires_safety_checker: bool = True,
112
- ):
113
- super().__init__()
114
-
115
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
116
- deprecation_message = (
117
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
118
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
119
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
120
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
121
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
122
- " file"
123
- )
124
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
125
- new_config = dict(scheduler.config)
126
- new_config["steps_offset"] = 1
127
- scheduler._internal_dict = FrozenDict(new_config)
128
-
129
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
130
- deprecation_message = (
131
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
132
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
133
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
134
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
135
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
136
- )
137
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
138
- new_config = dict(scheduler.config)
139
- new_config["clip_sample"] = False
140
- scheduler._internal_dict = FrozenDict(new_config)
141
-
142
- if safety_checker is None and requires_safety_checker:
143
- logger.warning(
144
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
145
- " that you abide to the conditions of the Alt Diffusion license and do not expose unfiltered"
146
- " results in services or applications open to the public. PaddleNLP team, diffusers team and Hugging Face"
147
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
148
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
149
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
150
- )
151
- if safety_checker is not None and feature_extractor is None:
152
- raise ValueError(
153
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
154
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
155
- )
156
-
157
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_ppdiffusers_version") and version.parse(
158
- version.parse(unet.config._ppdiffusers_version).base_version
159
- ) < version.parse("0.9.0.dev0")
160
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
161
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
162
- deprecation_message = (
163
- "The configuration file of the unet has set the default `sample_size` to smaller than"
164
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
165
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
166
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
167
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
168
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
169
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
170
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
171
- " the `unet/config.json` file"
172
- )
173
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
174
- new_config = dict(unet.config)
175
- new_config["sample_size"] = 64
176
- unet._internal_dict = FrozenDict(new_config)
177
-
178
- self.register_modules(
179
- vae=vae,
180
- text_encoder=text_encoder,
181
- tokenizer=tokenizer,
182
- unet=unet,
183
- scheduler=scheduler,
184
- safety_checker=safety_checker,
185
- feature_extractor=feature_extractor,
186
- )
187
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
188
- self.register_to_config(requires_safety_checker=requires_safety_checker)
189
-
190
- def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
191
- r"""
192
- Encodes the prompt into text encoder hidden states.
193
-
194
- Args:
195
- prompt (`str` or `list(int)`):
196
- prompt to be encoded
197
- num_images_per_prompt (`int`):
198
- number of images that should be generated per prompt
199
- do_classifier_free_guidance (`bool`):
200
- whether to use classifier free guidance or not
201
- negative_prompt (`str` or `List[str]`):
202
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
203
- if `guidance_scale` is less than `1`).
204
- """
205
- batch_size = len(prompt) if isinstance(prompt, list) else 1
206
-
207
- text_inputs = self.tokenizer(
208
- prompt,
209
- padding="max_length",
210
- max_length=self.tokenizer.model_max_length,
211
- truncation=True,
212
- return_tensors="pd",
213
- )
214
- text_input_ids = text_inputs.input_ids
215
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pd").input_ids
216
-
217
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not paddle.equal_all(
218
- text_input_ids, untruncated_ids
219
- ):
220
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
221
- logger.warning(
222
- "The following part of your input was truncated because XLM-Roberta can only handle sequences up to"
223
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
224
- )
225
-
226
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
227
- attention_mask = text_inputs.attention_mask
228
- else:
229
- attention_mask = None
230
-
231
- text_embeddings = self.text_encoder(
232
- text_input_ids,
233
- attention_mask=attention_mask,
234
- )
235
- text_embeddings = text_embeddings[0]
236
-
237
- # duplicate text embeddings for each generation per prompt, using mps friendly method
238
- bs_embed, seq_len, _ = text_embeddings.shape
239
- text_embeddings = text_embeddings.tile([1, num_images_per_prompt, 1])
240
- text_embeddings = text_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
241
-
242
- # get unconditional embeddings for classifier free guidance
243
- if do_classifier_free_guidance:
244
- uncond_tokens: List[str]
245
- if negative_prompt is None:
246
- uncond_tokens = [""] * batch_size
247
- elif type(prompt) is not type(negative_prompt):
248
- raise TypeError(
249
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
250
- f" {type(prompt)}."
251
- )
252
- elif isinstance(negative_prompt, str):
253
- uncond_tokens = [negative_prompt]
254
- elif batch_size != len(negative_prompt):
255
- raise ValueError(
256
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
257
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
258
- " the batch size of `prompt`."
259
- )
260
- else:
261
- uncond_tokens = negative_prompt
262
-
263
- max_length = text_input_ids.shape[-1]
264
- uncond_input = self.tokenizer(
265
- uncond_tokens,
266
- padding="max_length",
267
- max_length=max_length,
268
- truncation=True,
269
- return_tensors="pd",
270
- )
271
-
272
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
273
- attention_mask = uncond_input.attention_mask
274
- else:
275
- attention_mask = None
276
-
277
- uncond_embeddings = self.text_encoder(
278
- uncond_input.input_ids,
279
- attention_mask=attention_mask,
280
- )
281
- uncond_embeddings = uncond_embeddings[0]
282
-
283
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
284
- seq_len = uncond_embeddings.shape[1]
285
- uncond_embeddings = uncond_embeddings.tile([1, num_images_per_prompt, 1])
286
- uncond_embeddings = uncond_embeddings.reshape([batch_size * num_images_per_prompt, seq_len, -1])
287
-
288
- # For classifier free guidance, we need to do two forward passes.
289
- # Here we concatenate the unconditional and text embeddings into a single batch
290
- # to avoid doing two forward passes
291
- text_embeddings = paddle.concat([uncond_embeddings, text_embeddings])
292
-
293
- return text_embeddings
294
-
295
- def run_safety_checker(self, image, dtype):
296
- if self.safety_checker is not None:
297
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pd")
298
- image, has_nsfw_concept = self.safety_checker(
299
- images=image, clip_input=safety_checker_input.pixel_values.cast(dtype)
300
- )
301
- else:
302
- has_nsfw_concept = None
303
- return image, has_nsfw_concept
304
-
305
- def decode_latents(self, latents):
306
- latents = 1 / 0.18215 * latents
307
- image = self.vae.decode(latents).sample
308
- image = (image / 2 + 0.5).clip(0, 1)
309
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
310
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
311
- return image
312
-
313
- def prepare_extra_step_kwargs(self, generator, eta):
314
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
315
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
316
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
317
- # and should be between [0, 1]
318
-
319
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
320
- extra_step_kwargs = {}
321
- if accepts_eta:
322
- extra_step_kwargs["eta"] = eta
323
-
324
- # check if the scheduler accepts generator
325
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
326
- if accepts_generator:
327
- extra_step_kwargs["generator"] = generator
328
- return extra_step_kwargs
329
-
330
- def check_inputs(self, prompt, strength, callback_steps):
331
- if not isinstance(prompt, str) and not isinstance(prompt, list):
332
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
333
-
334
- if strength < 0 or strength > 1:
335
- raise ValueError(f"The value of strength should in [1.0, 1.0] but is {strength}")
336
-
337
- if (callback_steps is None) or (
338
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
339
- ):
340
- raise ValueError(
341
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
342
- f" {type(callback_steps)}."
343
- )
344
-
345
- def get_timesteps(self, num_inference_steps, strength):
346
- # get the original timestep using init_timestep
347
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
348
-
349
- t_start = max(num_inference_steps - init_timestep, 0)
350
- timesteps = self.scheduler.timesteps[t_start:]
351
-
352
- return timesteps, num_inference_steps - t_start
353
-
354
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, generator=None):
355
- image = image.cast(dtype=dtype)
356
- batch_size = batch_size * num_images_per_prompt
357
- if isinstance(generator, list) and len(generator) != batch_size:
358
- raise ValueError(
359
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
360
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
361
- )
362
-
363
- if isinstance(generator, list):
364
- init_latents = [
365
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
366
- ]
367
- init_latents = paddle.concat(init_latents, axis=0)
368
- else:
369
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
370
-
371
- init_latents = 0.18215 * init_latents
372
-
373
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
374
- # expand init_latents for batch_size
375
- deprecation_message = (
376
- f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
377
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
378
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
379
- " your script to pass as many initial images as text prompts to suppress this warning."
380
- )
381
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
382
- additional_image_per_prompt = batch_size // init_latents.shape[0]
383
- init_latents = paddle.concat([init_latents] * additional_image_per_prompt, axis=0)
384
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
385
- raise ValueError(
386
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
387
- )
388
- else:
389
- init_latents = paddle.concat([init_latents], axis=0)
390
-
391
- shape = init_latents.shape
392
- if isinstance(generator, list):
393
- shape = [
394
- 1,
395
- ] + shape[1:]
396
- noise = [paddle.randn(shape, generator=generator[i], dtype=dtype) for i in range(batch_size)]
397
- noise = paddle.concat(noise, axis=0)
398
- else:
399
- noise = paddle.randn(shape, generator=generator, dtype=dtype)
400
-
401
- # get latents
402
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
403
- latents = init_latents
404
-
405
- return latents
406
-
407
- @paddle.no_grad()
408
- def __call__(
409
- self,
410
- prompt: Union[str, List[str]],
411
- image: Union[paddle.Tensor, PIL.Image.Image] = None,
412
- strength: float = 0.8,
413
- num_inference_steps: Optional[int] = 50,
414
- guidance_scale: Optional[float] = 7.5,
415
- negative_prompt: Optional[Union[str, List[str]]] = None,
416
- num_images_per_prompt: Optional[int] = 1,
417
- eta: Optional[float] = 0.0,
418
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
419
- output_type: Optional[str] = "pil",
420
- return_dict: bool = True,
421
- callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
422
- callback_steps: Optional[int] = 1,
423
- ):
424
- r"""
425
- Function invoked when calling the pipeline for generation.
426
-
427
- Args:
428
- prompt (`str` or `List[str]`):
429
- The prompt or prompts to guide the image generation.
430
- image (`paddle.Tensor` or `PIL.Image.Image`):
431
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
432
- process.
433
- strength (`float`, *optional*, defaults to 0.8):
434
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
435
- `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
436
- number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
437
- noise will be maximum and the denoising process will run for the full number of iterations specified in
438
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
439
- num_inference_steps (`int`, *optional*, defaults to 50):
440
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
441
- expense of slower inference. This parameter will be modulated by `strength`.
442
- guidance_scale (`float`, *optional*, defaults to 7.5):
443
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
444
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
445
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
446
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
447
- usually at the expense of lower image quality.
448
- negative_prompt (`str` or `List[str]`, *optional*):
449
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
450
- if `guidance_scale` is less than `1`).
451
- num_images_per_prompt (`int`, *optional*, defaults to 1):
452
- The number of images to generate per prompt.
453
- eta (`float`, *optional*, defaults to 0.0):
454
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
455
- [`schedulers.DDIMScheduler`], will be ignored for others.
456
- generator (`paddle.Generator`, *optional*):
457
- One or a list of paddle generator(s) to make generation deterministic.
458
- output_type (`str`, *optional*, defaults to `"pil"`):
459
- The output format of the generate image. Choose between
460
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
461
- return_dict (`bool`, *optional*, defaults to `True`):
462
- Whether or not to return a [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] instead of a
463
- plain tuple.
464
- callback (`Callable`, *optional*):
465
- A function that will be called every `callback_steps` steps during inference. The function will be
466
- called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
467
- callback_steps (`int`, *optional*, defaults to 1):
468
- The frequency at which the `callback` function will be called. If not specified, the callback will be
469
- called at every step.
470
-
471
- Returns:
472
- [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] or `tuple`:
473
- [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
474
- When returning a tuple, the first element is a list with the generated images, and the second element is a
475
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
476
- (nsfw) content, according to the `safety_checker`.
477
- """
478
- # 1. Check inputs
479
- self.check_inputs(prompt, strength, callback_steps)
480
-
481
- # 2. Define call parameters
482
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
483
-
484
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
485
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
486
- # corresponds to doing no classifier free guidance.
487
- do_classifier_free_guidance = guidance_scale > 1.0
488
-
489
- # 3. Encode input prompt
490
- text_embeddings = self._encode_prompt(
491
- prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
492
- )
493
-
494
- # 4. Preprocess image
495
- image = preprocess(image)
496
-
497
- # 5. set timesteps
498
- self.scheduler.set_timesteps(num_inference_steps)
499
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
500
- latent_timestep = timesteps[:1].tile([batch_size * num_images_per_prompt])
501
-
502
- # 6. Prepare latent variables
503
- latents = self.prepare_latents(
504
- image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, generator
505
- )
506
-
507
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
508
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
509
-
510
- # 8. Denoising loop
511
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
512
- with self.progress_bar(total=num_inference_steps) as progress_bar:
513
- for i, t in enumerate(timesteps):
514
- # expand the latents if we are doing classifier free guidance
515
- latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
516
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
517
-
518
- # predict the noise residual
519
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
520
-
521
- # perform guidance
522
- if do_classifier_free_guidance:
523
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
524
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
525
-
526
- # compute the previous noisy sample x_t -> x_t-1
527
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
528
-
529
- # call the callback, if provided
530
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
531
- progress_bar.update()
532
- if callback is not None and i % callback_steps == 0:
533
- callback(i, t, latents)
534
-
535
- # 9. Post-processing
536
- image = self.decode_latents(latents)
537
-
538
- # 10. Run safety checker
539
- image, has_nsfw_concept = self.run_safety_checker(image, text_embeddings.dtype)
540
-
541
- # 11. Convert to PIL
542
- if output_type == "pil":
543
- image = self.numpy_to_pil(image)
544
-
545
- if not return_dict:
546
- return (image, has_nsfw_concept)
547
-
548
- return AltDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/44ov41za8i/FreeVC/models.py DELETED
@@ -1,351 +0,0 @@
1
- import copy
2
- import math
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
- import commons
8
- import modules
9
-
10
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
- from commons import init_weights, get_padding
13
-
14
-
15
- class ResidualCouplingBlock(nn.Module):
16
- def __init__(self,
17
- channels,
18
- hidden_channels,
19
- kernel_size,
20
- dilation_rate,
21
- n_layers,
22
- n_flows=4,
23
- gin_channels=0):
24
- super().__init__()
25
- self.channels = channels
26
- self.hidden_channels = hidden_channels
27
- self.kernel_size = kernel_size
28
- self.dilation_rate = dilation_rate
29
- self.n_layers = n_layers
30
- self.n_flows = n_flows
31
- self.gin_channels = gin_channels
32
-
33
- self.flows = nn.ModuleList()
34
- for i in range(n_flows):
35
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
36
- self.flows.append(modules.Flip())
37
-
38
- def forward(self, x, x_mask, g=None, reverse=False):
39
- if not reverse:
40
- for flow in self.flows:
41
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
42
- else:
43
- for flow in reversed(self.flows):
44
- x = flow(x, x_mask, g=g, reverse=reverse)
45
- return x
46
-
47
-
48
- class Encoder(nn.Module):
49
- def __init__(self,
50
- in_channels,
51
- out_channels,
52
- hidden_channels,
53
- kernel_size,
54
- dilation_rate,
55
- n_layers,
56
- gin_channels=0):
57
- super().__init__()
58
- self.in_channels = in_channels
59
- self.out_channels = out_channels
60
- self.hidden_channels = hidden_channels
61
- self.kernel_size = kernel_size
62
- self.dilation_rate = dilation_rate
63
- self.n_layers = n_layers
64
- self.gin_channels = gin_channels
65
-
66
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
67
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
68
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
69
-
70
- def forward(self, x, x_lengths, g=None):
71
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
72
- x = self.pre(x) * x_mask
73
- x = self.enc(x, x_mask, g=g)
74
- stats = self.proj(x) * x_mask
75
- m, logs = torch.split(stats, self.out_channels, dim=1)
76
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
77
- return z, m, logs, x_mask
78
-
79
-
80
- class Generator(torch.nn.Module):
81
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
82
- super(Generator, self).__init__()
83
- self.num_kernels = len(resblock_kernel_sizes)
84
- self.num_upsamples = len(upsample_rates)
85
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
86
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
87
-
88
- self.ups = nn.ModuleList()
89
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
90
- self.ups.append(weight_norm(
91
- ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
92
- k, u, padding=(k-u)//2)))
93
-
94
- self.resblocks = nn.ModuleList()
95
- for i in range(len(self.ups)):
96
- ch = upsample_initial_channel//(2**(i+1))
97
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
98
- self.resblocks.append(resblock(ch, k, d))
99
-
100
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
101
- self.ups.apply(init_weights)
102
-
103
- if gin_channels != 0:
104
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
105
-
106
- def forward(self, x, g=None):
107
- x = self.conv_pre(x)
108
- if g is not None:
109
- x = x + self.cond(g)
110
-
111
- for i in range(self.num_upsamples):
112
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
113
- x = self.ups[i](x)
114
- xs = None
115
- for j in range(self.num_kernels):
116
- if xs is None:
117
- xs = self.resblocks[i*self.num_kernels+j](x)
118
- else:
119
- xs += self.resblocks[i*self.num_kernels+j](x)
120
- x = xs / self.num_kernels
121
- x = F.leaky_relu(x)
122
- x = self.conv_post(x)
123
- x = torch.tanh(x)
124
-
125
- return x
126
-
127
- def remove_weight_norm(self):
128
- print('Removing weight norm...')
129
- for l in self.ups:
130
- remove_weight_norm(l)
131
- for l in self.resblocks:
132
- l.remove_weight_norm()
133
-
134
-
135
- class DiscriminatorP(torch.nn.Module):
136
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
137
- super(DiscriminatorP, self).__init__()
138
- self.period = period
139
- self.use_spectral_norm = use_spectral_norm
140
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
141
- self.convs = nn.ModuleList([
142
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
143
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
144
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
145
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
146
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
147
- ])
148
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
149
-
150
- def forward(self, x):
151
- fmap = []
152
-
153
- # 1d to 2d
154
- b, c, t = x.shape
155
- if t % self.period != 0: # pad first
156
- n_pad = self.period - (t % self.period)
157
- x = F.pad(x, (0, n_pad), "reflect")
158
- t = t + n_pad
159
- x = x.view(b, c, t // self.period, self.period)
160
-
161
- for l in self.convs:
162
- x = l(x)
163
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
164
- fmap.append(x)
165
- x = self.conv_post(x)
166
- fmap.append(x)
167
- x = torch.flatten(x, 1, -1)
168
-
169
- return x, fmap
170
-
171
-
172
- class DiscriminatorS(torch.nn.Module):
173
- def __init__(self, use_spectral_norm=False):
174
- super(DiscriminatorS, self).__init__()
175
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
176
- self.convs = nn.ModuleList([
177
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
178
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
179
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
180
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
181
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
182
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
183
- ])
184
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
185
-
186
- def forward(self, x):
187
- fmap = []
188
-
189
- for l in self.convs:
190
- x = l(x)
191
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
192
- fmap.append(x)
193
- x = self.conv_post(x)
194
- fmap.append(x)
195
- x = torch.flatten(x, 1, -1)
196
-
197
- return x, fmap
198
-
199
-
200
- class MultiPeriodDiscriminator(torch.nn.Module):
201
- def __init__(self, use_spectral_norm=False):
202
- super(MultiPeriodDiscriminator, self).__init__()
203
- periods = [2,3,5,7,11]
204
-
205
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
206
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
207
- self.discriminators = nn.ModuleList(discs)
208
-
209
- def forward(self, y, y_hat):
210
- y_d_rs = []
211
- y_d_gs = []
212
- fmap_rs = []
213
- fmap_gs = []
214
- for i, d in enumerate(self.discriminators):
215
- y_d_r, fmap_r = d(y)
216
- y_d_g, fmap_g = d(y_hat)
217
- y_d_rs.append(y_d_r)
218
- y_d_gs.append(y_d_g)
219
- fmap_rs.append(fmap_r)
220
- fmap_gs.append(fmap_g)
221
-
222
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
223
-
224
-
225
- class SpeakerEncoder(torch.nn.Module):
226
- def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256):
227
- super(SpeakerEncoder, self).__init__()
228
- self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True)
229
- self.linear = nn.Linear(model_hidden_size, model_embedding_size)
230
- self.relu = nn.ReLU()
231
-
232
- def forward(self, mels):
233
- self.lstm.flatten_parameters()
234
- _, (hidden, _) = self.lstm(mels)
235
- embeds_raw = self.relu(self.linear(hidden[-1]))
236
- return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
237
-
238
- def compute_partial_slices(self, total_frames, partial_frames, partial_hop):
239
- mel_slices = []
240
- for i in range(0, total_frames-partial_frames, partial_hop):
241
- mel_range = torch.arange(i, i+partial_frames)
242
- mel_slices.append(mel_range)
243
-
244
- return mel_slices
245
-
246
- def embed_utterance(self, mel, partial_frames=128, partial_hop=64):
247
- mel_len = mel.size(1)
248
- last_mel = mel[:,-partial_frames:]
249
-
250
- if mel_len > partial_frames:
251
- mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop)
252
- mels = list(mel[:,s] for s in mel_slices)
253
- mels.append(last_mel)
254
- mels = torch.stack(tuple(mels), 0).squeeze(1)
255
-
256
- with torch.no_grad():
257
- partial_embeds = self(mels)
258
- embed = torch.mean(partial_embeds, axis=0).unsqueeze(0)
259
- #embed = embed / torch.linalg.norm(embed, 2)
260
- else:
261
- with torch.no_grad():
262
- embed = self(last_mel)
263
-
264
- return embed
265
-
266
-
267
- class SynthesizerTrn(nn.Module):
268
- """
269
- Synthesizer for Training
270
- """
271
-
272
- def __init__(self,
273
- spec_channels,
274
- segment_size,
275
- inter_channels,
276
- hidden_channels,
277
- filter_channels,
278
- n_heads,
279
- n_layers,
280
- kernel_size,
281
- p_dropout,
282
- resblock,
283
- resblock_kernel_sizes,
284
- resblock_dilation_sizes,
285
- upsample_rates,
286
- upsample_initial_channel,
287
- upsample_kernel_sizes,
288
- gin_channels,
289
- ssl_dim,
290
- use_spk,
291
- **kwargs):
292
-
293
- super().__init__()
294
- self.spec_channels = spec_channels
295
- self.inter_channels = inter_channels
296
- self.hidden_channels = hidden_channels
297
- self.filter_channels = filter_channels
298
- self.n_heads = n_heads
299
- self.n_layers = n_layers
300
- self.kernel_size = kernel_size
301
- self.p_dropout = p_dropout
302
- self.resblock = resblock
303
- self.resblock_kernel_sizes = resblock_kernel_sizes
304
- self.resblock_dilation_sizes = resblock_dilation_sizes
305
- self.upsample_rates = upsample_rates
306
- self.upsample_initial_channel = upsample_initial_channel
307
- self.upsample_kernel_sizes = upsample_kernel_sizes
308
- self.segment_size = segment_size
309
- self.gin_channels = gin_channels
310
- self.ssl_dim = ssl_dim
311
- self.use_spk = use_spk
312
-
313
- self.enc_p = Encoder(ssl_dim, inter_channels, hidden_channels, 5, 1, 16)
314
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
315
- self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
316
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
317
-
318
- if not self.use_spk:
319
- self.enc_spk = SpeakerEncoder(model_hidden_size=gin_channels, model_embedding_size=gin_channels)
320
-
321
- def forward(self, c, spec, g=None, mel=None, c_lengths=None, spec_lengths=None):
322
- if c_lengths == None:
323
- c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
324
- if spec_lengths == None:
325
- spec_lengths = (torch.ones(spec.size(0)) * spec.size(-1)).to(spec.device)
326
-
327
- if not self.use_spk:
328
- g = self.enc_spk(mel.transpose(1,2))
329
- g = g.unsqueeze(-1)
330
-
331
- _, m_p, logs_p, _ = self.enc_p(c, c_lengths)
332
- z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g)
333
- z_p = self.flow(z, spec_mask, g=g)
334
-
335
- z_slice, ids_slice = commons.rand_slice_segments(z, spec_lengths, self.segment_size)
336
- o = self.dec(z_slice, g=g)
337
-
338
- return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
339
-
340
- def infer(self, c, g=None, mel=None, c_lengths=None):
341
- if c_lengths == None:
342
- c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
343
- if not self.use_spk:
344
- g = self.enc_spk.embed_utterance(mel.transpose(1,2))
345
- g = g.unsqueeze(-1)
346
-
347
- z_p, m_p, logs_p, c_mask = self.enc_p(c, c_lengths)
348
- z = self.flow(z_p, c_mask, g=g, reverse=True)
349
- o = self.dec(z * c_mask, g=g)
350
-
351
- return o
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/partial_fc.py DELETED
@@ -1,222 +0,0 @@
1
- import logging
2
- import os
3
-
4
- import torch
5
- import torch.distributed as dist
6
- from torch.nn import Module
7
- from torch.nn.functional import normalize, linear
8
- from torch.nn.parameter import Parameter
9
-
10
-
11
- class PartialFC(Module):
12
- """
13
- Author: {Xiang An, Yang Xiao, XuHan Zhu} in DeepGlint,
14
- Partial FC: Training 10 Million Identities on a Single Machine
15
- See the original paper:
16
- https://arxiv.org/abs/2010.05222
17
- """
18
-
19
- @torch.no_grad()
20
- def __init__(self, rank, local_rank, world_size, batch_size, resume,
21
- margin_softmax, num_classes, sample_rate=1.0, embedding_size=512, prefix="./"):
22
- """
23
- rank: int
24
- Unique process(GPU) ID from 0 to world_size - 1.
25
- local_rank: int
26
- Unique process(GPU) ID within the server from 0 to 7.
27
- world_size: int
28
- Number of GPU.
29
- batch_size: int
30
- Batch size on current rank(GPU).
31
- resume: bool
32
- Select whether to restore the weight of softmax.
33
- margin_softmax: callable
34
- A function of margin softmax, eg: cosface, arcface.
35
- num_classes: int
36
- The number of class center storage in current rank(CPU/GPU), usually is total_classes // world_size,
37
- required.
38
- sample_rate: float
39
- The partial fc sampling rate, when the number of classes increases to more than 2 millions, Sampling
40
- can greatly speed up training, and reduce a lot of GPU memory, default is 1.0.
41
- embedding_size: int
42
- The feature dimension, default is 512.
43
- prefix: str
44
- Path for save checkpoint, default is './'.
45
- """
46
- super(PartialFC, self).__init__()
47
- #
48
- self.num_classes: int = num_classes
49
- self.rank: int = rank
50
- self.local_rank: int = local_rank
51
- self.device: torch.device = torch.device("cuda:{}".format(self.local_rank))
52
- self.world_size: int = world_size
53
- self.batch_size: int = batch_size
54
- self.margin_softmax: callable = margin_softmax
55
- self.sample_rate: float = sample_rate
56
- self.embedding_size: int = embedding_size
57
- self.prefix: str = prefix
58
- self.num_local: int = num_classes // world_size + int(rank < num_classes % world_size)
59
- self.class_start: int = num_classes // world_size * rank + min(rank, num_classes % world_size)
60
- self.num_sample: int = int(self.sample_rate * self.num_local)
61
-
62
- self.weight_name = os.path.join(self.prefix, "rank_{}_softmax_weight.pt".format(self.rank))
63
- self.weight_mom_name = os.path.join(self.prefix, "rank_{}_softmax_weight_mom.pt".format(self.rank))
64
-
65
- if resume:
66
- try:
67
- self.weight: torch.Tensor = torch.load(self.weight_name)
68
- self.weight_mom: torch.Tensor = torch.load(self.weight_mom_name)
69
- if self.weight.shape[0] != self.num_local or self.weight_mom.shape[0] != self.num_local:
70
- raise IndexError
71
- logging.info("softmax weight resume successfully!")
72
- logging.info("softmax weight mom resume successfully!")
73
- except (FileNotFoundError, KeyError, IndexError):
74
- self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device)
75
- self.weight_mom: torch.Tensor = torch.zeros_like(self.weight)
76
- logging.info("softmax weight init!")
77
- logging.info("softmax weight mom init!")
78
- else:
79
- self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device)
80
- self.weight_mom: torch.Tensor = torch.zeros_like(self.weight)
81
- logging.info("softmax weight init successfully!")
82
- logging.info("softmax weight mom init successfully!")
83
- self.stream: torch.cuda.Stream = torch.cuda.Stream(local_rank)
84
-
85
- self.index = None
86
- if int(self.sample_rate) == 1:
87
- self.update = lambda: 0
88
- self.sub_weight = Parameter(self.weight)
89
- self.sub_weight_mom = self.weight_mom
90
- else:
91
- self.sub_weight = Parameter(torch.empty((0, 0)).cuda(local_rank))
92
-
93
- def save_params(self):
94
- """ Save softmax weight for each rank on prefix
95
- """
96
- torch.save(self.weight.data, self.weight_name)
97
- torch.save(self.weight_mom, self.weight_mom_name)
98
-
99
- @torch.no_grad()
100
- def sample(self, total_label):
101
- """
102
- Sample all positive class centers in each rank, and random select neg class centers to filling a fixed
103
- `num_sample`.
104
-
105
- total_label: tensor
106
- Label after all gather, which cross all GPUs.
107
- """
108
- index_positive = (self.class_start <= total_label) & (total_label < self.class_start + self.num_local)
109
- total_label[~index_positive] = -1
110
- total_label[index_positive] -= self.class_start
111
- if int(self.sample_rate) != 1:
112
- positive = torch.unique(total_label[index_positive], sorted=True)
113
- if self.num_sample - positive.size(0) >= 0:
114
- perm = torch.rand(size=[self.num_local], device=self.device)
115
- perm[positive] = 2.0
116
- index = torch.topk(perm, k=self.num_sample)[1]
117
- index = index.sort()[0]
118
- else:
119
- index = positive
120
- self.index = index
121
- total_label[index_positive] = torch.searchsorted(index, total_label[index_positive])
122
- self.sub_weight = Parameter(self.weight[index])
123
- self.sub_weight_mom = self.weight_mom[index]
124
-
125
- def forward(self, total_features, norm_weight):
126
- """ Partial fc forward, `logits = X * sample(W)`
127
- """
128
- torch.cuda.current_stream().wait_stream(self.stream)
129
- logits = linear(total_features, norm_weight)
130
- return logits
131
-
132
- @torch.no_grad()
133
- def update(self):
134
- """ Set updated weight and weight_mom to memory bank.
135
- """
136
- self.weight_mom[self.index] = self.sub_weight_mom
137
- self.weight[self.index] = self.sub_weight
138
-
139
- def prepare(self, label, optimizer):
140
- """
141
- get sampled class centers for cal softmax.
142
-
143
- label: tensor
144
- Label tensor on each rank.
145
- optimizer: opt
146
- Optimizer for partial fc, which need to get weight mom.
147
- """
148
- with torch.cuda.stream(self.stream):
149
- total_label = torch.zeros(
150
- size=[self.batch_size * self.world_size], device=self.device, dtype=torch.long)
151
- dist.all_gather(list(total_label.chunk(self.world_size, dim=0)), label)
152
- self.sample(total_label)
153
- optimizer.state.pop(optimizer.param_groups[-1]['params'][0], None)
154
- optimizer.param_groups[-1]['params'][0] = self.sub_weight
155
- optimizer.state[self.sub_weight]['momentum_buffer'] = self.sub_weight_mom
156
- norm_weight = normalize(self.sub_weight)
157
- return total_label, norm_weight
158
-
159
- def forward_backward(self, label, features, optimizer):
160
- """
161
- Partial fc forward and backward with model parallel
162
-
163
- label: tensor
164
- Label tensor on each rank(GPU)
165
- features: tensor
166
- Features tensor on each rank(GPU)
167
- optimizer: optimizer
168
- Optimizer for partial fc
169
-
170
- Returns:
171
- --------
172
- x_grad: tensor
173
- The gradient of features.
174
- loss_v: tensor
175
- Loss value for cross entropy.
176
- """
177
- total_label, norm_weight = self.prepare(label, optimizer)
178
- total_features = torch.zeros(
179
- size=[self.batch_size * self.world_size, self.embedding_size], device=self.device)
180
- dist.all_gather(list(total_features.chunk(self.world_size, dim=0)), features.data)
181
- total_features.requires_grad = True
182
-
183
- logits = self.forward(total_features, norm_weight)
184
- logits = self.margin_softmax(logits, total_label)
185
-
186
- with torch.no_grad():
187
- max_fc = torch.max(logits, dim=1, keepdim=True)[0]
188
- dist.all_reduce(max_fc, dist.ReduceOp.MAX)
189
-
190
- # calculate exp(logits) and all-reduce
191
- logits_exp = torch.exp(logits - max_fc)
192
- logits_sum_exp = logits_exp.sum(dim=1, keepdims=True)
193
- dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM)
194
-
195
- # calculate prob
196
- logits_exp.div_(logits_sum_exp)
197
-
198
- # get one-hot
199
- grad = logits_exp
200
- index = torch.where(total_label != -1)[0]
201
- one_hot = torch.zeros(size=[index.size()[0], grad.size()[1]], device=grad.device)
202
- one_hot.scatter_(1, total_label[index, None], 1)
203
-
204
- # calculate loss
205
- loss = torch.zeros(grad.size()[0], 1, device=grad.device)
206
- loss[index] = grad[index].gather(1, total_label[index, None])
207
- dist.all_reduce(loss, dist.ReduceOp.SUM)
208
- loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1)
209
-
210
- # calculate grad
211
- grad[index] -= one_hot
212
- grad.div_(self.batch_size * self.world_size)
213
-
214
- logits.backward(grad)
215
- if total_features.grad is not None:
216
- total_features.grad.detach_()
217
- x_grad: torch.Tensor = torch.zeros_like(features, requires_grad=True)
218
- # feature gradient all-reduce
219
- dist.reduce_scatter(x_grad, list(total_features.grad.chunk(self.world_size, dim=0)))
220
- x_grad = x_grad * self.world_size
221
- # backward backbone
222
- return x_grad, loss_v
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIatUIUC/CodeLATS/lats/lats.py DELETED
@@ -1,233 +0,0 @@
1
- from utils import enumerate_resume, make_printv, write_jsonl, resume_success_count
2
- from executors import executor_factory
3
- from generators import generator_factory, model_factory
4
- from typing import List, Dict, Any
5
- import math
6
- from typing import Tuple
7
- import sys
8
- import random
9
-
10
- sys.set_int_max_str_digits(100000) # Increase the limit to 10000 digits
11
-
12
- react_prompt_header = "Here are some previous solutions and the corresponding test results.\n"
13
- react_prompt_starter = "\n\nYour solution:\n"
14
- extra_header = "\n\nName the function answer()"
15
-
16
- class Node:
17
- def __init__(self, solution: str, parent=None, context="", depth=0):
18
- self.solution = solution
19
- self.parent = parent
20
- self.children = []
21
- self.value = 0
22
- self.visits = 0
23
- self.context = ""
24
- self.depth = depth
25
- self.reflection = ""
26
- self.test_feedback = ""
27
-
28
- def uct(self, exploration_weight=1.0):
29
- if self.visits == 0:
30
- #return float('inf')
31
- return self.value
32
- return (self.value / self.visits) + exploration_weight * math.sqrt(math.log(self.parent.visits) / self.visits)
33
-
34
- def best_child(self):
35
- if not self.children: # Check if children list is empty
36
- return None
37
- return max(self.children, key=lambda child: child.uct())
38
-
39
- def best_child_value(self):
40
- if not self.children: # Check if children list is empty
41
- return None
42
- return max(self.children, key=lambda child: child.value)
43
-
44
- def update(self, reward: float):
45
- self.visits += 1
46
- self.value += reward
47
-
48
-
49
- def prune_context_blocks(context: str, max_length: int) -> str:
50
- """Prune the context to fit within the specified max_length by removing entire blocks of content using 'trial' as a delimiter."""
51
- if len(context) <= max_length:
52
- return context
53
-
54
- # Split by the block delimiter "trial".
55
- blocks = context.split('Previous Trial')
56
-
57
- # Remove the earliest blocks until the context fits within max_length.
58
- while len('trial'.join(blocks)) > max_length and blocks:
59
- blocks.pop(0)
60
-
61
- return 'trial'.join(blocks)
62
-
63
- def gather_context_from_tree(node: Node) -> Tuple[List[str], List[str]]:
64
- """
65
- Given a node, walk up its tree and gather the feedback and reflections
66
- from each parent node until the root is reached.
67
-
68
- Args:
69
- node (Node): The node to start gathering context from.
70
-
71
- Returns:
72
- Tuple[List[str], List[str]]: Two lists containing the accumulated feedback and reflections.
73
- """
74
- accumulated_feedback = []
75
- accumulated_reflection = []
76
- num_nodes = 0
77
-
78
- while node and num_nodes < 2:
79
- num_nodes += 1
80
- if node.test_feedback:
81
- accumulated_feedback.append(node.test_feedback)
82
- if node.reflection:
83
- accumulated_reflection.append(node.reflection)
84
- node = node.parent
85
-
86
- # Reverse the lists so that the context from the earliest nodes is first
87
- return accumulated_feedback[::-1], accumulated_reflection[::-1]
88
-
89
- def sample_n_random(items: List[str], n: int) -> List[str]:
90
- """Sample min(n, len(items)) random items from a list"""
91
- assert n >= 0
92
- if n >= len(items):
93
- return items
94
- return random.sample(items, n)
95
-
96
- def run_lats(
97
- model_name: str,
98
- language: str,
99
- max_iters: int,
100
- verbose: bool,
101
- instruction: str = "Write some code to print Hello World in Python",
102
- n_samples: int = 3,
103
- depth: int = 5,
104
- ) -> None:
105
- exe = executor_factory(language)
106
- gen = generator_factory(language)
107
- model = model_factory(model_name)
108
-
109
-
110
- num_success = 0 # Counter for successful solutions
111
- cur_func_impl = None
112
-
113
- item = {}
114
-
115
- #for idx, item in enumerate(dataset):
116
-
117
- tests = gen.internal_tests(instruction + extra_header, model, 1)
118
- tests_i = sample_n_random(tests, 1)
119
-
120
- while cur_func_impl is None:
121
- cur_func_impl = gen.func_impl(instruction + extra_header, model, "simple")
122
- root = Node(cur_func_impl) # initial solution (for pass@1 metric)
123
-
124
- # Lists for logging
125
- reflections = []
126
- implementations = []
127
- test_feedback = []
128
- is_solved = False
129
-
130
- # first attempt
131
-
132
- implementations.append(cur_func_impl)
133
- assert isinstance(cur_func_impl, str)
134
- is_passing, feedback, _ = exe.execute(cur_func_impl, tests_i)
135
- test_feedback.append(feedback)
136
-
137
- # if solved, exit early
138
- if is_passing:
139
- num_success += 1
140
- return cur_func_impl # GET SOLUTION
141
-
142
- reflection = gen.self_reflection(cur_func_impl, feedback, model)
143
- reflections += [reflection]
144
- root.test_feedback = feedback
145
- root.reflection = reflection
146
- max_iters = int(max_iters)
147
- for cur_iter in range(max_iters):
148
- # Selection
149
- tests_i = sample_n_random(tests, 1)
150
-
151
- node = root
152
- trajectory = {
153
- 'solutions': [],
154
- 'feedbacks': []
155
- }
156
-
157
- while node.children:
158
- node = node.best_child()
159
- trajectory['solutions'].append(node.solution)
160
-
161
- # Expansion
162
- for _ in range(n_samples):
163
- new_solution = None
164
- strategy = "mcts"
165
- prev_func_impl = node.solution
166
- feedback = node.test_feedback
167
- reflection = node.reflection
168
- acc_feedback, acc_reflection = gather_context_from_tree(node)
169
-
170
- while new_solution is None:
171
- new_solution = gen.func_impl(
172
- func_sig=instruction+extra_header,
173
- model=model,
174
- strategy=strategy,
175
- prev_func_impl=prev_func_impl,
176
- feedback=feedback,
177
- self_reflection=reflection,
178
- acc_feedback = acc_feedback,
179
- acc_reflection = acc_reflection
180
- )
181
-
182
- combined_context = "\nPrevious Trial\n\n" + new_solution
183
-
184
- child = Node(new_solution, parent=node, context=combined_context, depth=node.depth + 1)
185
- node.children.append(child)
186
-
187
- # Simulation
188
- reward_real = 0
189
- for child in node.children:
190
- is_passing_internal, feedback_internal, _ = exe.execute(child.solution, tests_i)
191
- if not is_passing_internal:
192
- reflection = gen.self_reflection(child.solution, feedback_internal, model)
193
- reflections.append(reflection)
194
- child.reflection = reflection
195
- child.test_feedback = feedback_internal
196
- child.context += "\n\nPrevious Trial\n\n" + child.solution + "\n\nTest results: \n" + feedback_internal + "\n\nSelf-reflection: " + reflection
197
- else:
198
- child.context += "\n\nPrevious Trial\n\n" + child.solution + "\n\nTest results: \n" + feedback_internal
199
- child.reflection = ""
200
- child.test_feedback = feedback_internal
201
-
202
- if "Tested passed:" in feedback_internal:
203
- # Split at "Tests failed:" and get the part before it (which contains the passed tests)
204
- passed_section = feedback_internal.split("Tests failed:")[0]
205
- # Split at "Tested passed:" and get the part after it, then count the non-empty lines
206
- reward_internal = len([line for line in passed_section.split("Tested passed:")[1].splitlines() if line.strip() != ''])
207
- reward_internal = reward_internal / len(tests_i)
208
- else:
209
- reward_internal = 0
210
- if is_passing_internal or cur_iter == max_iters - 1:
211
- item["solution"] = child.solution
212
- break
213
-
214
- if is_solved:
215
- break
216
-
217
- reward = reward_internal + reward_real
218
- child.update(reward)
219
-
220
- # Backpropagation
221
- temp = child
222
- while temp.parent:
223
- temp = temp.parent
224
- temp.update(reward)
225
-
226
- # Choose the best solution after all iterations
227
- if is_solved:
228
- best_solution = item["solution"]
229
- else:
230
- best_solution = root.best_child_value().solution
231
- item["solution"] = best_solution
232
-
233
- return best_solution
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AONYLMR/White-box-Cartoonization/app.py DELETED
@@ -1,108 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
- import argparse
5
- import functools
6
- import os
7
- import pathlib
8
- import sys
9
- from typing import Callable
10
- import uuid
11
-
12
- import gradio as gr
13
- import huggingface_hub
14
- import numpy as np
15
- import PIL.Image
16
-
17
- from io import BytesIO
18
- from wbc.cartoonize import Cartoonize
19
-
20
- ORIGINAL_REPO_URL = 'https://github.com/SystemErrorWang/White-box-Cartoonization'
21
- TITLE = 'SystemErrorWang/White-box-Cartoonization'
22
- DESCRIPTION = f"""This is a demo for {ORIGINAL_REPO_URL}.
23
-
24
- """
25
- ARTICLE = """
26
-
27
- """
28
-
29
- SAFEHASH = [x for x in "0123456789-abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
30
- def compress_UUID():
31
- '''
32
- 根据http://www.ietf.org/rfc/rfc1738.txt,由uuid编码扩bai大字符域生成du串
33
- 包括:[0-9a-zA-Z\-_]共64个
34
- 长度:(32-2)/3*2=20
35
- 备注:可在地球上人zhi人都用,使用100年不重复(2^120)
36
- :return:String
37
- '''
38
- row = str(uuid.uuid4()).replace('-', '')
39
- safe_code = ''
40
- for i in range(10):
41
- enbin = "%012d" % int(bin(int(row[i * 3] + row[i * 3 + 1] + row[i * 3 + 2], 16))[2:], 10)
42
- safe_code += (SAFEHASH[int(enbin[0:6], 2)] + SAFEHASH[int(enbin[6:12], 2)])
43
- safe_code = safe_code.replace('-', '')
44
- return safe_code
45
-
46
-
47
- def parse_args() -> argparse.Namespace:
48
- parser = argparse.ArgumentParser()
49
- parser.add_argument('--device', type=str, default='cpu')
50
- parser.add_argument('--theme', type=str)
51
- parser.add_argument('--live', action='store_true')
52
- parser.add_argument('--share', action='store_true')
53
- parser.add_argument('--port', type=int)
54
- parser.add_argument('--disable-queue',
55
- dest='enable_queue',
56
- action='store_false')
57
- parser.add_argument('--allow-flagging', type=str, default='never')
58
- parser.add_argument('--allow-screenshot', action='store_true')
59
- return parser.parse_args()
60
-
61
- def run(
62
- image,
63
- cartoonize : Cartoonize
64
- ) -> tuple[PIL.Image.Image]:
65
-
66
- out_path = compress_UUID()+'.png'
67
- cartoonize.run_sigle(image.name, out_path)
68
-
69
- return PIL.Image.open(out_path)
70
-
71
-
72
- def main():
73
- gr.close_all()
74
-
75
- args = parse_args()
76
-
77
- cartoonize = Cartoonize(os.path.join(os.path.dirname(os.path.abspath(__file__)),'wbc/saved_models/'))
78
-
79
- func = functools.partial(run, cartoonize=cartoonize)
80
- func = functools.update_wrapper(func, run)
81
-
82
- gr.Interface(
83
- func,
84
- [
85
- gr.inputs.Image(type='file', label='Input Image'),
86
- ],
87
- [
88
- gr.outputs.Image(
89
- type='pil',
90
- label='Result'),
91
- ],
92
- # examples=examples,
93
- theme=args.theme,
94
- title=TITLE,
95
- description=DESCRIPTION,
96
- article=ARTICLE,
97
- allow_screenshot=args.allow_screenshot,
98
- allow_flagging=args.allow_flagging,
99
- live=args.live,
100
- ).launch(
101
- enable_queue=args.enable_queue,
102
- server_port=args.port,
103
- share=args.share,
104
- )
105
-
106
-
107
- if __name__ == '__main__':
108
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/util.py DELETED
@@ -1,200 +0,0 @@
1
- import importlib
2
- import math
3
-
4
- import cv2
5
- import torch
6
- import numpy as np
7
-
8
- import os
9
- from safetensors.torch import load_file
10
-
11
- from inspect import isfunction
12
- from PIL import Image, ImageDraw, ImageFont
13
-
14
-
15
- def log_txt_as_img(wh, xc, size=10):
16
- # wh a tuple of (width, height)
17
- # xc a list of captions to plot
18
- b = len(xc)
19
- txts = list()
20
- for bi in range(b):
21
- txt = Image.new("RGB", wh, color="white")
22
- draw = ImageDraw.Draw(txt)
23
- font = ImageFont.truetype('assets/DejaVuSans.ttf', size=size)
24
- nc = int(40 * (wh[0] / 256))
25
- lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
26
-
27
- try:
28
- draw.text((0, 0), lines, fill="black", font=font)
29
- except UnicodeEncodeError:
30
- print("Cant encode string for logging. Skipping.")
31
-
32
- txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
33
- txts.append(txt)
34
- txts = np.stack(txts)
35
- txts = torch.tensor(txts)
36
- return txts
37
-
38
-
39
- def ismap(x):
40
- if not isinstance(x, torch.Tensor):
41
- return False
42
- return (len(x.shape) == 4) and (x.shape[1] > 3)
43
-
44
-
45
- def isimage(x):
46
- if not isinstance(x, torch.Tensor):
47
- return False
48
- return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
49
-
50
-
51
- def exists(x):
52
- return x is not None
53
-
54
-
55
- def default(val, d):
56
- if exists(val):
57
- return val
58
- return d() if isfunction(d) else d
59
-
60
-
61
- def mean_flat(tensor):
62
- """
63
- https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
64
- Take the mean over all non-batch dimensions.
65
- """
66
- return tensor.mean(dim=list(range(1, len(tensor.shape))))
67
-
68
-
69
- def count_params(model, verbose=False):
70
- total_params = sum(p.numel() for p in model.parameters())
71
- if verbose:
72
- print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.")
73
- return total_params
74
-
75
-
76
- def instantiate_from_config(config):
77
- if not "target" in config:
78
- if config == '__is_first_stage__':
79
- return None
80
- elif config == "__is_unconditional__":
81
- return None
82
- raise KeyError("Expected key `target` to instantiate.")
83
- return get_obj_from_str(config["target"])(**config.get("params", dict()))
84
-
85
-
86
- def get_obj_from_str(string, reload=False):
87
- module, cls = string.rsplit(".", 1)
88
- if reload:
89
- module_imp = importlib.import_module(module)
90
- importlib.reload(module_imp)
91
- return getattr(importlib.import_module(module, package=None), cls)
92
-
93
-
94
- checkpoint_dict_replacements = {
95
- 'cond_stage_model.transformer.text_model.embeddings.': 'cond_stage_model.transformer.embeddings.',
96
- 'cond_stage_model.transformer.text_model.encoder.': 'cond_stage_model.transformer.encoder.',
97
- 'cond_stage_model.transformer.text_model.final_layer_norm.': 'cond_stage_model.transformer.final_layer_norm.',
98
- }
99
-
100
-
101
- def transform_checkpoint_dict_key(k):
102
- for text, replacement in checkpoint_dict_replacements.items():
103
- if k.startswith(text):
104
- k = replacement + k[len(text):]
105
-
106
- return k
107
-
108
-
109
- def get_state_dict_from_checkpoint(pl_sd):
110
- pl_sd = pl_sd.pop("state_dict", pl_sd)
111
- pl_sd.pop("state_dict", None)
112
-
113
- sd = {}
114
- for k, v in pl_sd.items():
115
- new_key = transform_checkpoint_dict_key(k)
116
-
117
- if new_key is not None:
118
- sd[new_key] = v
119
-
120
- pl_sd.clear()
121
- pl_sd.update(sd)
122
-
123
- return pl_sd
124
-
125
-
126
- def read_state_dict(checkpoint_file, print_global_state=False):
127
- _, extension = os.path.splitext(checkpoint_file)
128
- if extension.lower() == ".safetensors":
129
- pl_sd = load_file(checkpoint_file, device='cpu')
130
- else:
131
- pl_sd = torch.load(checkpoint_file, map_location='cpu')
132
-
133
- if print_global_state and "global_step" in pl_sd:
134
- print(f"Global Step: {pl_sd['global_step']}")
135
-
136
- sd = get_state_dict_from_checkpoint(pl_sd)
137
- return sd
138
-
139
-
140
- def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
141
- print(f"Loading model from {ckpt}")
142
- sd = read_state_dict(ckpt)
143
- model = instantiate_from_config(config.model)
144
- m, u = model.load_state_dict(sd, strict=False)
145
- if len(m) > 0 and verbose:
146
- print("missing keys:")
147
- print(m)
148
- if len(u) > 0 and verbose:
149
- print("unexpected keys:")
150
- print(u)
151
-
152
- if 'anything' in ckpt.lower() and vae_ckpt is None:
153
- vae_ckpt = 'models/anything-v4.0.vae.pt'
154
-
155
- if vae_ckpt is not None and vae_ckpt != 'None':
156
- print(f"Loading vae model from {vae_ckpt}")
157
- vae_sd = torch.load(vae_ckpt, map_location="cpu")
158
- if "global_step" in vae_sd:
159
- print(f"Global Step: {vae_sd['global_step']}")
160
- sd = vae_sd["state_dict"]
161
- m, u = model.first_stage_model.load_state_dict(sd, strict=False)
162
- if len(m) > 0 and verbose:
163
- print("missing keys:")
164
- print(m)
165
- if len(u) > 0 and verbose:
166
- print("unexpected keys:")
167
- print(u)
168
-
169
- model.cuda()
170
- model.eval()
171
- return model
172
-
173
-
174
- def resize_numpy_image(image, max_resolution=512 * 512, resize_short_edge=None):
175
- h, w = image.shape[:2]
176
- if resize_short_edge is not None:
177
- k = resize_short_edge / min(h, w)
178
- else:
179
- k = max_resolution / (h * w)
180
- k = k**0.5
181
- h = int(np.round(h * k / 64)) * 64
182
- w = int(np.round(w * k / 64)) * 64
183
- image = cv2.resize(image, (w, h), interpolation=cv2.INTER_LANCZOS4)
184
- return image
185
-
186
-
187
- # make uc and prompt shapes match via padding for long prompts
188
- null_cond = None
189
-
190
- def fix_cond_shapes(model, prompt_condition, uc):
191
- if uc is None:
192
- return prompt_condition, uc
193
- global null_cond
194
- if null_cond is None:
195
- null_cond = model.get_learned_conditioning([""])
196
- while prompt_condition.shape[1] > uc.shape[1]:
197
- uc = torch.cat((uc, null_cond.repeat((uc.shape[0], 1, 1))), axis=1)
198
- while prompt_condition.shape[1] < uc.shape[1]:
199
- prompt_condition = torch.cat((prompt_condition, null_cond.repeat((prompt_condition.shape[0], 1, 1))), axis=1)
200
- return prompt_condition, uc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/texttranslation.js DELETED
@@ -1,2 +0,0 @@
1
- import TextTranslation from './behaviors/texttranslation/TextTranslation.js';
2
- export default TextTranslation;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/ColorInputBase.js DELETED
@@ -1,145 +0,0 @@
1
- import Sizer from '../../sizer/Sizer.js';
2
- import CreateSwatch from './methods/CreateSwatch.js';
3
- import CreateInputText from '../../utils/build/CreateInputText.js';
4
- import ColorStringToInteger from '../../../../plugins/utils/color/ColorStringToInteger.js';
5
- import GetHexColorString from '../../../../plugins/utils/color/GetHexColorString.js';
6
- import SetSwatchColor from './methods/SetSwatchColor.js';
7
- import ResizeGameObject from '../../../../plugins/utils/size/ResizeGameObject.js';
8
-
9
- const GetValue = Phaser.Utils.Objects.GetValue;
10
- const IsPlainObject = Phaser.Utils.Objects.IsPlainObject;
11
- const Clamp = Phaser.Math.Clamp;
12
-
13
- class ColorInput extends Sizer {
14
- constructor(scene, config) {
15
- if (config === undefined) {
16
- config = {};
17
- }
18
- config.orientation = 0;
19
- super(scene, config);
20
- this.type = 'rexColorInputLite';
21
-
22
- // Add elements
23
- var background = GetValue(config, 'background', undefined);
24
-
25
- var swatchConfig = GetValue(config, 'swatch');
26
- var swatchSize;
27
- if (IsPlainObject(swatchConfig)) {
28
- swatchSize = GetValue(swatchConfig, 'size');
29
- }
30
- var swatch = CreateSwatch(scene, GetValue(config, 'swatch'));
31
-
32
- var inputTextConfig = GetValue(config, 'inputText', true);
33
- var inputText;
34
- if (inputTextConfig) {
35
- inputText = CreateInputText(scene, inputTextConfig);
36
- }
37
-
38
- if (background) {
39
- this.addBackground(background);
40
- }
41
-
42
- if (swatch) {
43
- swatchSize = GetValue(config, 'swatchSize', swatchSize);
44
- var squareExpandSwatch;
45
- if (swatchSize !== undefined) {
46
- ResizeGameObject(swatch, swatchSize, swatchSize);
47
- squareExpandSwatch = false;
48
- } else {
49
- squareExpandSwatch = GetValue(config, 'squareExpandSwatch', true);
50
- }
51
-
52
- var fitRatio = (squareExpandSwatch) ? 1 : 0;
53
- this.add(
54
- swatch,
55
- { proportion: 0, expand: false, fitRatio: fitRatio }
56
- );
57
- }
58
-
59
- if (inputText) {
60
- var proportion = (GetValue(inputTextConfig, 'width') === undefined) ? 1 : 0;
61
- var expand = (GetValue(inputTextConfig, 'height') === undefined) ? true : false;
62
- this.add(
63
- inputText,
64
- { proportion: proportion, expand: expand }
65
- )
66
- }
67
-
68
- this.addChildrenMap('background', background);
69
- this.addChildrenMap('swatch', swatch);
70
- this.addChildrenMap('inputText', inputText);
71
-
72
-
73
- if (inputText) {
74
- inputText.on('close', function () {
75
- this.setValue(inputText.value);
76
- }, this);
77
- }
78
-
79
- var callback = GetValue(config, 'valuechangeCallback', null);
80
- if (callback !== null) {
81
- var scope = GetValue(config, 'valuechangeCallbackScope', undefined);
82
- this.on('valuechange', callback, scope);
83
- }
84
-
85
- this.setValue(GetValue(config, 'value', 0x0));
86
- }
87
-
88
- get value() {
89
- return this._value;
90
- }
91
-
92
- set value(value) {
93
- if (typeof (value) === 'string') {
94
- value = ColorStringToInteger(value);
95
- if (value == null) {
96
- var inputText = this.childrenMap.inputText;
97
- if (inputText) {
98
- inputText.setText(GetHexColorString(this._value));
99
- }
100
- return;
101
- }
102
- } else {
103
- value = Clamp(Math.floor(value), 0, 0xffffff);
104
- }
105
-
106
- if (this._value === value) {
107
- return;
108
- }
109
-
110
- this._value = value;
111
-
112
- var swatch = this.childrenMap.swatch;
113
- if (swatch) {
114
- SetSwatchColor(swatch, value);
115
- }
116
-
117
- var inputText = this.childrenMap.inputText;
118
- if (inputText) {
119
- inputText.setText(GetHexColorString(value));
120
- }
121
-
122
- this.emit('valuechange', this._value);
123
- }
124
-
125
- setValue(value) {
126
- this.value = value;
127
- return this;
128
- }
129
-
130
- get color() {
131
- return this._value;
132
- }
133
-
134
- set color(color) {
135
- this.value = color;
136
- }
137
-
138
- setColor(color) {
139
- this.color = color;
140
- return this;
141
- }
142
-
143
- }
144
-
145
- export default ColorInput;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/tabpages/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import TabPages from './TabPages.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('tabPages', function (config) {
6
- var gameObject = new TabPages(this.scene, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.TabPages', TabPages);
12
-
13
- export default TabPages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/replicate.py DELETED
@@ -1,94 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # File : replicate.py
3
- # Author : Jiayuan Mao
4
- # Email : [email protected]
5
- # Date : 27/01/2018
6
- #
7
- # This file is part of Synchronized-BatchNorm-PyTorch.
8
- # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9
- # Distributed under MIT License.
10
-
11
- import functools
12
-
13
- from torch.nn.parallel.data_parallel import DataParallel
14
-
15
- __all__ = [
16
- 'CallbackContext',
17
- 'execute_replication_callbacks',
18
- 'DataParallelWithCallback',
19
- 'patch_replication_callback'
20
- ]
21
-
22
-
23
- class CallbackContext(object):
24
- pass
25
-
26
-
27
- def execute_replication_callbacks(modules):
28
- """
29
- Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
30
-
31
- The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
32
-
33
- Note that, as all modules are isomorphism, we assign each sub-module with a context
34
- (shared among multiple copies of this module on different devices).
35
- Through this context, different copies can share some information.
36
-
37
- We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
38
- of any slave copies.
39
- """
40
- master_copy = modules[0]
41
- nr_modules = len(list(master_copy.modules()))
42
- ctxs = [CallbackContext() for _ in range(nr_modules)]
43
-
44
- for i, module in enumerate(modules):
45
- for j, m in enumerate(module.modules()):
46
- if hasattr(m, '__data_parallel_replicate__'):
47
- m.__data_parallel_replicate__(ctxs[j], i)
48
-
49
-
50
- class DataParallelWithCallback(DataParallel):
51
- """
52
- Data Parallel with a replication callback.
53
-
54
- An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
55
- original `replicate` function.
56
- The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
57
-
58
- Examples:
59
- > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
60
- > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
61
- # sync_bn.__data_parallel_replicate__ will be invoked.
62
- """
63
-
64
- def replicate(self, module, device_ids):
65
- modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
66
- execute_replication_callbacks(modules)
67
- return modules
68
-
69
-
70
- def patch_replication_callback(data_parallel):
71
- """
72
- Monkey-patch an existing `DataParallel` object. Add the replication callback.
73
- Useful when you have customized `DataParallel` implementation.
74
-
75
- Examples:
76
- > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
77
- > sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
78
- > patch_replication_callback(sync_bn)
79
- # this is equivalent to
80
- > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
81
- > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
82
- """
83
-
84
- assert isinstance(data_parallel, DataParallel)
85
-
86
- old_replicate = data_parallel.replicate
87
-
88
- @functools.wraps(old_replicate)
89
- def new_replicate(module, device_ids):
90
- modules = old_replicate(module, device_ids)
91
- execute_replication_callbacks(modules)
92
- return modules
93
-
94
- data_parallel.replicate = new_replicate
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/torch2onnx.py DELETED
@@ -1,59 +0,0 @@
1
- import numpy as np
2
- import onnx
3
- import torch
4
-
5
-
6
- def convert_onnx(net, path_module, output, opset=11, simplify=False):
7
- assert isinstance(net, torch.nn.Module)
8
- img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)
9
- img = img.astype(np.float)
10
- img = (img / 255. - 0.5) / 0.5 # torch style norm
11
- img = img.transpose((2, 0, 1))
12
- img = torch.from_numpy(img).unsqueeze(0).float()
13
-
14
- weight = torch.load(path_module)
15
- net.load_state_dict(weight)
16
- net.eval()
17
- torch.onnx.export(net, img, output, keep_initializers_as_inputs=False, verbose=False, opset_version=opset)
18
- model = onnx.load(output)
19
- graph = model.graph
20
- graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
21
- if simplify:
22
- from onnxsim import simplify
23
- model, check = simplify(model)
24
- assert check, "Simplified ONNX model could not be validated"
25
- onnx.save(model, output)
26
-
27
-
28
- if __name__ == '__main__':
29
- import os
30
- import argparse
31
- from backbones import get_model
32
-
33
- parser = argparse.ArgumentParser(description='ArcFace PyTorch to onnx')
34
- parser.add_argument('input', type=str, help='input backbone.pth file or path')
35
- parser.add_argument('--output', type=str, default=None, help='output onnx path')
36
- parser.add_argument('--network', type=str, default=None, help='backbone network')
37
- parser.add_argument('--simplify', type=bool, default=False, help='onnx simplify')
38
- args = parser.parse_args()
39
- input_file = args.input
40
- if os.path.isdir(input_file):
41
- input_file = os.path.join(input_file, "backbone.pth")
42
- assert os.path.exists(input_file)
43
- model_name = os.path.basename(os.path.dirname(input_file)).lower()
44
- params = model_name.split("_")
45
- if len(params) >= 3 and params[1] in ('arcface', 'cosface'):
46
- if args.network is None:
47
- args.network = params[2]
48
- assert args.network is not None
49
- print(args)
50
- backbone_onnx = get_model(args.network, dropout=0)
51
-
52
- output_path = args.output
53
- if output_path is None:
54
- output_path = os.path.join(os.path.dirname(__file__), 'onnx')
55
- if not os.path.exists(output_path):
56
- os.makedirs(output_path)
57
- assert os.path.isdir(output_path)
58
- output_file = os.path.join(output_path, "%s.onnx" % model_name)
59
- convert_onnx(backbone_onnx, input_file, output_file, simplify=args.simplify)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/utils/text2speech.py DELETED
@@ -1,20 +0,0 @@
1
- import os
2
- import tempfile
3
- from TTS.api import TTS
4
-
5
-
6
- class TTSTalker():
7
- def __init__(self) -> None:
8
- model_name = TTS.list_models()[0]
9
- self.tts = TTS(model_name)
10
-
11
- def test(self, text, language='en'):
12
-
13
- tempf = tempfile.NamedTemporaryFile(
14
- delete = False,
15
- suffix = ('.'+'wav'),
16
- )
17
-
18
- self.tts.tts_to_file(text, speaker=self.tts.speakers[0], language=language, file_path=tempf.name)
19
-
20
- return tempf.name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/theme.py DELETED
@@ -1,231 +0,0 @@
1
- import gradio as gr
2
- from toolbox import get_conf
3
- CODE_HIGHLIGHT, = get_conf('CODE_HIGHLIGHT')
4
- # gradio可用颜色列表
5
- # gr.themes.utils.colors.slate (石板色)
6
- # gr.themes.utils.colors.gray (灰色)
7
- # gr.themes.utils.colors.zinc (锌色)
8
- # gr.themes.utils.colors.neutral (中性色)
9
- # gr.themes.utils.colors.stone (石头色)
10
- # gr.themes.utils.colors.red (红色)
11
- # gr.themes.utils.colors.orange (橙色)
12
- # gr.themes.utils.colors.amber (琥珀色)
13
- # gr.themes.utils.colors.yellow (黄色)
14
- # gr.themes.utils.colors.lime (酸橙色)
15
- # gr.themes.utils.colors.green (绿色)
16
- # gr.themes.utils.colors.emerald (祖母绿)
17
- # gr.themes.utils.colors.teal (青蓝色)
18
- # gr.themes.utils.colors.cyan (青色)
19
- # gr.themes.utils.colors.sky (天蓝色)
20
- # gr.themes.utils.colors.blue (蓝色)
21
- # gr.themes.utils.colors.indigo (靛蓝色)
22
- # gr.themes.utils.colors.violet (紫罗兰色)
23
- # gr.themes.utils.colors.purple (紫色)
24
- # gr.themes.utils.colors.fuchsia (洋红色)
25
- # gr.themes.utils.colors.pink (粉红色)
26
- # gr.themes.utils.colors.rose (玫瑰色)
27
-
28
-
29
- def adjust_theme():
30
- try:
31
- color_er = gr.themes.utils.colors.fuchsia
32
- set_theme = gr.themes.Default(
33
- primary_hue=gr.themes.utils.colors.orange,
34
- neutral_hue=gr.themes.utils.colors.gray,
35
- font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui",
36
- "sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
37
- font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
38
- set_theme.set(
39
- # Colors
40
- input_background_fill_dark="*neutral_800",
41
- # Transition
42
- button_transition="none",
43
- # Shadows
44
- button_shadow="*shadow_drop",
45
- button_shadow_hover="*shadow_drop_lg",
46
- button_shadow_active="*shadow_inset",
47
- input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
48
- input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
49
- input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
50
- checkbox_label_shadow="*shadow_drop",
51
- block_shadow="*shadow_drop",
52
- form_gap_width="1px",
53
- # Button borders
54
- input_border_width="1px",
55
- input_background_fill="white",
56
- # Gradients
57
- stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
58
- stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
59
- error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
60
- error_background_fill_dark="*background_fill_primary",
61
- checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
62
- checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
63
- checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
64
- checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
65
- button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
66
- button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
67
- button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
68
- button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
69
- button_primary_border_color_dark="*primary_500",
70
- button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
71
- button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
72
- button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
73
- button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
74
- button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
75
- button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
76
- button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
77
- button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
78
- button_cancel_border_color=color_er.c200,
79
- button_cancel_border_color_dark=color_er.c600,
80
- button_cancel_text_color=color_er.c600,
81
- button_cancel_text_color_dark="white",
82
- )
83
- except:
84
- set_theme = None
85
- print('gradio版本较旧, 不能自定义字体和颜色')
86
- return set_theme
87
-
88
-
89
- advanced_css = """
90
- /* 设置表格的外边距为1em,内部单元���之间边框合并,空单元格显示. */
91
- .markdown-body table {
92
- margin: 1em 0;
93
- border-collapse: collapse;
94
- empty-cells: show;
95
- }
96
-
97
- /* 设置表格单元格的内边距为5px,边框粗细为1.2px,颜色为--border-color-primary. */
98
- .markdown-body th, .markdown-body td {
99
- border: 1.2px solid var(--border-color-primary);
100
- padding: 5px;
101
- }
102
-
103
- /* 设置表头背景颜色为rgba(175,184,193,0.2),透明度为0.2. */
104
- .markdown-body thead {
105
- background-color: rgba(175,184,193,0.2);
106
- }
107
-
108
- /* 设置表头单元格的内边距为0.5em和0.2em. */
109
- .markdown-body thead th {
110
- padding: .5em .2em;
111
- }
112
-
113
- /* 去掉列表前缀的默认间距,使其与文本线对齐. */
114
- .markdown-body ol, .markdown-body ul {
115
- padding-inline-start: 2em !important;
116
- }
117
-
118
- /* 设定聊天气泡的样式,包括圆角、最大宽度和阴影等. */
119
- [class *= "message"] {
120
- border-radius: var(--radius-xl) !important;
121
- /* padding: var(--spacing-xl) !important; */
122
- /* font-size: var(--text-md) !important; */
123
- /* line-height: var(--line-md) !important; */
124
- /* min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */
125
- /* min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */
126
- }
127
- [data-testid = "bot"] {
128
- max-width: 95%;
129
- /* width: auto !important; */
130
- border-bottom-left-radius: 0 !important;
131
- }
132
- [data-testid = "user"] {
133
- max-width: 100%;
134
- /* width: auto !important; */
135
- border-bottom-right-radius: 0 !important;
136
- }
137
-
138
- /* 行内代码的背景设为淡灰色,设定圆角和间距. */
139
- .markdown-body code {
140
- display: inline;
141
- white-space: break-spaces;
142
- border-radius: 6px;
143
- margin: 0 2px 0 2px;
144
- padding: .2em .4em .1em .4em;
145
- background-color: rgba(175,184,193,0.2);
146
- }
147
- /* 设定代码块的样式,包括背景颜色、内、外边距、圆角。 */
148
- .markdown-body pre code {
149
- display: block;
150
- overflow: auto;
151
- white-space: pre;
152
- background-color: rgba(175,184,193,0.2);
153
- border-radius: 10px;
154
- padding: 1em;
155
- margin: 1em 2em 1em 0.5em;
156
- }
157
-
158
- """
159
-
160
- if CODE_HIGHLIGHT:
161
- advanced_css += """
162
-
163
- .hll { background-color: #ffffcc }
164
- .c { color: #3D7B7B; font-style: italic } /* Comment */
165
- .err { border: 1px solid #FF0000 } /* Error */
166
- .k { color: hsl(197, 94%, 51%); font-weight: bold } /* Keyword */
167
- .o { color: #666666 } /* Operator */
168
- .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */
169
- .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */
170
- .cp { color: #9C6500 } /* Comment.Preproc */
171
- .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */
172
- .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */
173
- .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */
174
- .gd { color: #A00000 } /* Generic.Deleted */
175
- .ge { font-style: italic } /* Generic.Emph */
176
- .gr { color: #E40000 } /* Generic.Error */
177
- .gh { color: #000080; font-weight: bold } /* Generic.Heading */
178
- .gi { color: #008400 } /* Generic.Inserted */
179
- .go { color: #717171 } /* Generic.Output */
180
- .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
181
- .gs { font-weight: bold } /* Generic.Strong */
182
- .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
183
- .gt { color: #a9dd00 } /* Generic.Traceback */
184
- .kc { color: #008000; font-weight: bold } /* Keyword.Constant */
185
- .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
186
- .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
187
- .kp { color: #008000 } /* Keyword.Pseudo */
188
- .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
189
- .kt { color: #B00040 } /* Keyword.Type */
190
- .m { color: #666666 } /* Literal.Number */
191
- .s { color: #BA2121 } /* Literal.String */
192
- .na { color: #687822 } /* Name.Attribute */
193
- .nb { color: #e5f8c3 } /* Name.Builtin */
194
- .nc { color: #ffad65; font-weight: bold } /* Name.Class */
195
- .no { color: #880000 } /* Name.Constant */
196
- .nd { color: #AA22FF } /* Name.Decorator */
197
- .ni { color: #717171; font-weight: bold } /* Name.Entity */
198
- .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */
199
- .nf { color: #f9f978 } /* Name.Function */
200
- .nl { color: #767600 } /* Name.Label */
201
- .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
202
- .nt { color: #008000; font-weight: bold } /* Name.Tag */
203
- .nv { color: #19177C } /* Name.Variable */
204
- .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
205
- .w { color: #bbbbbb } /* Text.Whitespace */
206
- .mb { color: #666666 } /* Literal.Number.Bin */
207
- .mf { color: #666666 } /* Literal.Number.Float */
208
- .mh { color: #666666 } /* Literal.Number.Hex */
209
- .mi { color: #666666 } /* Literal.Number.Integer */
210
- .mo { color: #666666 } /* Literal.Number.Oct */
211
- .sa { color: #BA2121 } /* Literal.String.Affix */
212
- .sb { color: #BA2121 } /* Literal.String.Backtick */
213
- .sc { color: #BA2121 } /* Literal.String.Char */
214
- .dl { color: #BA2121 } /* Literal.String.Delimiter */
215
- .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
216
- .s2 { color: #2bf840 } /* Literal.String.Double */
217
- .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */
218
- .sh { color: #BA2121 } /* Literal.String.Heredoc */
219
- .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */
220
- .sx { color: #008000 } /* Literal.String.Other */
221
- .sr { color: #A45A77 } /* Literal.String.Regex */
222
- .s1 { color: #BA2121 } /* Literal.String.Single */
223
- .ss { color: #19177C } /* Literal.String.Symbol */
224
- .bp { color: #008000 } /* Name.Builtin.Pseudo */
225
- .fm { color: #0000FF } /* Name.Function.Magic */
226
- .vc { color: #19177C } /* Name.Variable.Class */
227
- .vg { color: #19177C } /* Name.Variable.Global */
228
- .vi { color: #19177C } /* Name.Variable.Instance */
229
- .vm { color: #19177C } /* Name.Variable.Magic */
230
- .il { color: #666666 } /* Literal.Number.Integer.Long */
231
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/__init__.py DELETED
File without changes
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.py DELETED
@@ -1,315 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- import os
10
- import numpy as np
11
- import torch
12
- import warnings
13
-
14
- from .. import custom_ops
15
- from .. import misc
16
- from . import upfirdn2d
17
- from . import bias_act
18
-
19
- # ----------------------------------------------------------------------------
20
-
21
- _plugin = None
22
-
23
-
24
- def _init():
25
- global _plugin
26
- if _plugin is None:
27
-
28
- # sources=['filtered_lrelu.h', 'filtered_lrelu.cu', 'filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu']
29
- # sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
30
- # try:
31
- # _plugin = custom_ops.get_plugin('filtered_lrelu_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math', '--allow-unsupported-compiler'])
32
- # except:
33
- # warnings.warn('Failed to build CUDA kernels for filtered_lrelu_plugin. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
34
-
35
- _plugin = custom_ops.get_plugin_v3(
36
- module_name='filtered_lrelu_plugin',
37
- sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu',
38
- 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'],
39
- headers=['filtered_lrelu.h', 'filtered_lrelu.cu'],
40
- source_dir=os.path.dirname(__file__),
41
- extra_cuda_cflags=['--use_fast_math',
42
- '--allow-unsupported-compiler'],
43
- )
44
- return True
45
-
46
-
47
- def _get_filter_size(f):
48
- if f is None:
49
- return 1, 1
50
- assert isinstance(f, torch.Tensor)
51
- assert 1 <= f.ndim <= 2
52
- return f.shape[-1], f.shape[0] # width, height
53
-
54
-
55
- def _parse_padding(padding):
56
- if isinstance(padding, int):
57
- padding = [padding, padding]
58
- assert isinstance(padding, (list, tuple))
59
- assert all(isinstance(x, (int, np.integer)) for x in padding)
60
- padding = [int(x) for x in padding]
61
- if len(padding) == 2:
62
- px, py = padding
63
- padding = [px, px, py, py]
64
- px0, px1, py0, py1 = padding
65
- return px0, px1, py0, py1
66
-
67
- # ----------------------------------------------------------------------------
68
-
69
-
70
- def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'):
71
- r"""Filtered leaky ReLU for a batch of 2D images.
72
-
73
- Performs the following sequence of operations for each channel:
74
-
75
- 1. Add channel-specific bias if provided (`b`).
76
-
77
- 2. Upsample the image by inserting N-1 zeros after each pixel (`up`).
78
-
79
- 3. Pad the image with the specified number of zeros on each side (`padding`).
80
- Negative padding corresponds to cropping the image.
81
-
82
- 4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it
83
- so that the footprint of all output pixels lies within the input image.
84
-
85
- 5. Multiply each value by the provided gain factor (`gain`).
86
-
87
- 6. Apply leaky ReLU activation function to each value.
88
-
89
- 7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided.
90
-
91
- 8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking
92
- it so that the footprint of all output pixels lies within the input image.
93
-
94
- 9. Downsample the image by keeping every Nth pixel (`down`).
95
-
96
- The fused op is considerably more efficient than performing the same calculation
97
- using standard PyTorch ops. It supports gradients of arbitrary order.
98
-
99
- Args:
100
- x: Float32/float16/float64 input tensor of the shape
101
- `[batch_size, num_channels, in_height, in_width]`.
102
- fu: Float32 upsampling FIR filter of the shape
103
- `[filter_height, filter_width]` (non-separable),
104
- `[filter_taps]` (separable), or
105
- `None` (identity).
106
- fd: Float32 downsampling FIR filter of the shape
107
- `[filter_height, filter_width]` (non-separable),
108
- `[filter_taps]` (separable), or
109
- `None` (identity).
110
- b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
111
- as `x`. The length of vector must must match the channel dimension of `x`.
112
- up: Integer upsampling factor (default: 1).
113
- down: Integer downsampling factor. (default: 1).
114
- padding: Padding with respect to the upsampled image. Can be a single number
115
- or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
116
- (default: 0).
117
- gain: Overall scaling factor for signal magnitude (default: sqrt(2)).
118
- slope: Slope on the negative side of leaky ReLU (default: 0.2).
119
- clamp: Maximum magnitude for leaky ReLU output (default: None).
120
- flip_filter: False = convolution, True = correlation (default: False).
121
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
122
-
123
- Returns:
124
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
125
- """
126
- assert isinstance(x, torch.Tensor)
127
- assert impl in ['ref', 'cuda']
128
- if impl == 'cuda' and x.device.type == 'cuda' and _init():
129
- return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0)
130
- return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter)
131
-
132
- # ----------------------------------------------------------------------------
133
-
134
-
135
- @misc.profiled_function
136
- def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
137
- """Slow and memory-inefficient reference implementation of `filtered_lrelu()` using
138
- existing `upfirdn2n()` and `bias_act()` ops.
139
- """
140
- assert isinstance(x, torch.Tensor) and x.ndim == 4
141
- fu_w, fu_h = _get_filter_size(fu)
142
- fd_w, fd_h = _get_filter_size(fd)
143
- if b is not None:
144
- assert isinstance(b, torch.Tensor) and b.dtype == x.dtype
145
- misc.assert_shape(b, [x.shape[1]])
146
- assert isinstance(up, int) and up >= 1
147
- assert isinstance(down, int) and down >= 1
148
- px0, px1, py0, py1 = _parse_padding(padding)
149
- assert gain == float(gain) and gain > 0
150
- assert slope == float(slope) and slope >= 0
151
- assert clamp is None or (clamp == float(clamp) and clamp >= 0)
152
-
153
- # Calculate output size.
154
- batch_size, channels, in_h, in_w = x.shape
155
- in_dtype = x.dtype
156
- out_w = (in_w * up + (px0 + px1) - (fu_w - 1) -
157
- (fd_w - 1) + (down - 1)) // down
158
- out_h = (in_h * up + (py0 + py1) - (fu_h - 1) -
159
- (fd_h - 1) + (down - 1)) // down
160
-
161
- # Compute using existing ops.
162
- x = bias_act.bias_act(x=x, b=b) # Apply bias.
163
- # Upsample.
164
- x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[
165
- px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter)
166
- # Bias, leaky ReLU, clamp.
167
- x = bias_act.bias_act(x=x, act='lrelu', alpha=slope,
168
- gain=gain, clamp=clamp)
169
- # Downsample.
170
- x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter)
171
-
172
- # Check output shape & dtype.
173
- misc.assert_shape(x, [batch_size, channels, out_h, out_w])
174
- assert x.dtype == in_dtype
175
- return x
176
-
177
- # ----------------------------------------------------------------------------
178
-
179
-
180
- _filtered_lrelu_cuda_cache = dict()
181
-
182
-
183
- def _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
184
- """Fast CUDA implementation of `filtered_lrelu()` using custom ops.
185
- """
186
- assert isinstance(up, int) and up >= 1
187
- assert isinstance(down, int) and down >= 1
188
- px0, px1, py0, py1 = _parse_padding(padding)
189
- assert gain == float(gain) and gain > 0
190
- gain = float(gain)
191
- assert slope == float(slope) and slope >= 0
192
- slope = float(slope)
193
- assert clamp is None or (clamp == float(clamp) and clamp >= 0)
194
- clamp = float(clamp if clamp is not None else 'inf')
195
-
196
- # Lookup from cache.
197
- key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter)
198
- if key in _filtered_lrelu_cuda_cache:
199
- return _filtered_lrelu_cuda_cache[key]
200
-
201
- # Forward op.
202
- class FilteredLReluCuda(torch.autograd.Function):
203
- @staticmethod
204
- def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ
205
- assert isinstance(x, torch.Tensor) and x.ndim == 4
206
-
207
- # Replace empty up/downsample kernels with full 1x1 kernels (faster than separable).
208
- if fu is None:
209
- fu = torch.ones([1, 1], dtype=torch.float32, device=x.device)
210
- if fd is None:
211
- fd = torch.ones([1, 1], dtype=torch.float32, device=x.device)
212
- assert 1 <= fu.ndim <= 2
213
- assert 1 <= fd.ndim <= 2
214
-
215
- # Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1.
216
- if up == 1 and fu.ndim == 1 and fu.shape[0] == 1:
217
- fu = fu.square()[None]
218
- if down == 1 and fd.ndim == 1 and fd.shape[0] == 1:
219
- fd = fd.square()[None]
220
-
221
- # Missing sign input tensor.
222
- if si is None:
223
- si = torch.empty([0])
224
-
225
- # Missing bias tensor.
226
- if b is None:
227
- b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device)
228
-
229
- # Construct internal sign tensor only if gradients are needed.
230
- write_signs = (si.numel() == 0) and (
231
- x.requires_grad or b.requires_grad)
232
-
233
- # Warn if input storage strides are not in decreasing order due to e.g. channels-last layout.
234
- strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1]
235
- if any(a < b for a, b in zip(strides[:-1], strides[1:])):
236
- warnings.warn(
237
- "low-performance memory layout detected in filtered_lrelu input", RuntimeWarning)
238
-
239
- # Call C++/Cuda plugin if datatype is supported.
240
- if x.dtype in [torch.float16, torch.float32]:
241
- if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device):
242
- warnings.warn(
243
- "filtered_lrelu called with non-default cuda stream but concurrent execution is not supported", RuntimeWarning)
244
- y, so, return_code = _plugin.filtered_lrelu(
245
- x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs)
246
- else:
247
- return_code = -1
248
-
249
- # No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because
250
- # only the bit-packed sign tensor is retained for gradient computation.
251
- if return_code < 0:
252
- warnings.warn(
253
- "filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback", RuntimeWarning)
254
-
255
- y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias.
256
- # Upsample.
257
- y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[
258
- px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter)
259
- # Activation function and sign handling. Modifies y in-place.
260
- so = _plugin.filtered_lrelu_act_(
261
- y, si, sx, sy, gain, slope, clamp, write_signs)
262
- # Downsample.
263
- y = upfirdn2d.upfirdn2d(
264
- x=y, f=fd, down=down, flip_filter=flip_filter)
265
-
266
- # Prepare for gradient computation.
267
- ctx.save_for_backward(fu, fd, (si if si.numel() else so))
268
- ctx.x_shape = x.shape
269
- ctx.y_shape = y.shape
270
- ctx.s_ofs = sx, sy
271
- return y
272
-
273
- @staticmethod
274
- def backward(ctx, dy): # pylint: disable=arguments-differ
275
- fu, fd, si = ctx.saved_tensors
276
- _, _, xh, xw = ctx.x_shape
277
- _, _, yh, yw = ctx.y_shape
278
- sx, sy = ctx.s_ofs
279
- dx = None # 0
280
- dfu = None
281
- assert not ctx.needs_input_grad[1]
282
- dfd = None
283
- assert not ctx.needs_input_grad[2]
284
- db = None # 3
285
- dsi = None
286
- assert not ctx.needs_input_grad[4]
287
- dsx = None
288
- assert not ctx.needs_input_grad[5]
289
- dsy = None
290
- assert not ctx.needs_input_grad[6]
291
-
292
- if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]:
293
- pp = [
294
- (fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0,
295
- xw * up - yw * down + px0 - (up - 1),
296
- (fu.shape[0] - 1) + (fd.shape[0] - 1) - py0,
297
- xh * up - yh * down + py0 - (up - 1),
298
- ]
299
- gg = gain * (up ** 2) / (down ** 2)
300
- ff = (not flip_filter)
301
- sx = sx - (fu.shape[-1] - 1) + px0
302
- sy = sy - (fu.shape[0] - 1) + py0
303
- dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope,
304
- clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy)
305
-
306
- if ctx.needs_input_grad[3]:
307
- db = dx.sum([0, 2, 3])
308
-
309
- return dx, dfu, dfd, db, dsi, dsx, dsy
310
-
311
- # Add to cache.
312
- _filtered_lrelu_cuda_cache[key] = FilteredLReluCuda
313
- return FilteredLReluCuda
314
-
315
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_heun_discrete.py DELETED
@@ -1,426 +0,0 @@
1
- # Copyright 2023 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import math
16
- from collections import defaultdict
17
- from typing import List, Optional, Tuple, Union
18
-
19
- import numpy as np
20
- import torch
21
-
22
- from ..configuration_utils import ConfigMixin, register_to_config
23
- from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
24
-
25
-
26
- # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
27
- def betas_for_alpha_bar(
28
- num_diffusion_timesteps,
29
- max_beta=0.999,
30
- alpha_transform_type="cosine",
31
- ):
32
- """
33
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
34
- (1-beta) over time from t = [0,1].
35
-
36
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
37
- to that part of the diffusion process.
38
-
39
-
40
- Args:
41
- num_diffusion_timesteps (`int`): the number of betas to produce.
42
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
43
- prevent singularities.
44
- alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
45
- Choose from `cosine` or `exp`
46
-
47
- Returns:
48
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
49
- """
50
- if alpha_transform_type == "cosine":
51
-
52
- def alpha_bar_fn(t):
53
- return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
54
-
55
- elif alpha_transform_type == "exp":
56
-
57
- def alpha_bar_fn(t):
58
- return math.exp(t * -12.0)
59
-
60
- else:
61
- raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
62
-
63
- betas = []
64
- for i in range(num_diffusion_timesteps):
65
- t1 = i / num_diffusion_timesteps
66
- t2 = (i + 1) / num_diffusion_timesteps
67
- betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
68
- return torch.tensor(betas, dtype=torch.float32)
69
-
70
-
71
- class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
72
- """
73
- Implements Algorithm 2 (Heun steps) from Karras et al. (2022). for discrete beta schedules. Based on the original
74
- k-diffusion implementation by Katherine Crowson:
75
- https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L90
76
-
77
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
78
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
79
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
80
- [`~SchedulerMixin.from_pretrained`] functions.
81
-
82
- Args:
83
- num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the
84
- starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`):
85
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
86
- `linear` or `scaled_linear`.
87
- trained_betas (`np.ndarray`, optional):
88
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
89
- prediction_type (`str`, default `epsilon`, optional):
90
- prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
91
- process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
92
- https://imagen.research.google/video/paper.pdf).
93
- clip_sample (`bool`, default `True`):
94
- option to clip predicted sample for numerical stability.
95
- clip_sample_range (`float`, default `1.0`):
96
- the maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
97
- use_karras_sigmas (`bool`, *optional*, defaults to `False`):
98
- This parameter controls whether to use Karras sigmas (Karras et al. (2022) scheme) for step sizes in the
99
- noise schedule during the sampling process. If True, the sigmas will be determined according to a sequence
100
- of noise levels {σi} as defined in Equation (5) of the paper https://arxiv.org/pdf/2206.00364.pdf.
101
- timestep_spacing (`str`, default `"linspace"`):
102
- The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample
103
- Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information.
104
- steps_offset (`int`, default `0`):
105
- an offset added to the inference steps. You can use a combination of `offset=1` and
106
- `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
107
- stable diffusion.
108
- """
109
-
110
- _compatibles = [e.name for e in KarrasDiffusionSchedulers]
111
- order = 2
112
-
113
- @register_to_config
114
- def __init__(
115
- self,
116
- num_train_timesteps: int = 1000,
117
- beta_start: float = 0.00085, # sensible defaults
118
- beta_end: float = 0.012,
119
- beta_schedule: str = "linear",
120
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
121
- prediction_type: str = "epsilon",
122
- use_karras_sigmas: Optional[bool] = False,
123
- clip_sample: Optional[bool] = False,
124
- clip_sample_range: float = 1.0,
125
- timestep_spacing: str = "linspace",
126
- steps_offset: int = 0,
127
- ):
128
- if trained_betas is not None:
129
- self.betas = torch.tensor(trained_betas, dtype=torch.float32)
130
- elif beta_schedule == "linear":
131
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
132
- elif beta_schedule == "scaled_linear":
133
- # this schedule is very specific to the latent diffusion model.
134
- self.betas = (
135
- torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
136
- )
137
- elif beta_schedule == "squaredcos_cap_v2":
138
- # Glide cosine schedule
139
- self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="cosine")
140
- elif beta_schedule == "exp":
141
- self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="exp")
142
- else:
143
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
144
-
145
- self.alphas = 1.0 - self.betas
146
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
147
-
148
- # set all values
149
- self.set_timesteps(num_train_timesteps, None, num_train_timesteps)
150
- self.use_karras_sigmas = use_karras_sigmas
151
-
152
- def index_for_timestep(self, timestep, schedule_timesteps=None):
153
- if schedule_timesteps is None:
154
- schedule_timesteps = self.timesteps
155
-
156
- indices = (schedule_timesteps == timestep).nonzero()
157
-
158
- # The sigma index that is taken for the **very** first `step`
159
- # is always the second index (or the last index if there is only 1)
160
- # This way we can ensure we don't accidentally skip a sigma in
161
- # case we start in the middle of the denoising schedule (e.g. for image-to-image)
162
- if len(self._index_counter) == 0:
163
- pos = 1 if len(indices) > 1 else 0
164
- else:
165
- timestep_int = timestep.cpu().item() if torch.is_tensor(timestep) else timestep
166
- pos = self._index_counter[timestep_int]
167
-
168
- return indices[pos].item()
169
-
170
- @property
171
- def init_noise_sigma(self):
172
- # standard deviation of the initial noise distribution
173
- if self.config.timestep_spacing in ["linspace", "trailing"]:
174
- return self.sigmas.max()
175
-
176
- return (self.sigmas.max() ** 2 + 1) ** 0.5
177
-
178
- def scale_model_input(
179
- self,
180
- sample: torch.FloatTensor,
181
- timestep: Union[float, torch.FloatTensor],
182
- ) -> torch.FloatTensor:
183
- """
184
- Args:
185
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
186
- current timestep.
187
- sample (`torch.FloatTensor`): input sample timestep (`int`, optional): current timestep
188
- Returns:
189
- `torch.FloatTensor`: scaled input sample
190
- """
191
- step_index = self.index_for_timestep(timestep)
192
-
193
- sigma = self.sigmas[step_index]
194
- sample = sample / ((sigma**2 + 1) ** 0.5)
195
- return sample
196
-
197
- def set_timesteps(
198
- self,
199
- num_inference_steps: int,
200
- device: Union[str, torch.device] = None,
201
- num_train_timesteps: Optional[int] = None,
202
- ):
203
- """
204
- Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
205
-
206
- Args:
207
- num_inference_steps (`int`):
208
- the number of diffusion steps used when generating samples with a pre-trained model.
209
- device (`str` or `torch.device`, optional):
210
- the device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
211
- """
212
- self.num_inference_steps = num_inference_steps
213
-
214
- num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps
215
-
216
- # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
217
- if self.config.timestep_spacing == "linspace":
218
- timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy()
219
- elif self.config.timestep_spacing == "leading":
220
- step_ratio = num_train_timesteps // self.num_inference_steps
221
- # creates integer timesteps by multiplying by ratio
222
- # casting to int to avoid issues when num_inference_step is power of 3
223
- timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float)
224
- timesteps += self.config.steps_offset
225
- elif self.config.timestep_spacing == "trailing":
226
- step_ratio = num_train_timesteps / self.num_inference_steps
227
- # creates integer timesteps by multiplying by ratio
228
- # casting to int to avoid issues when num_inference_step is power of 3
229
- timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(float)
230
- timesteps -= 1
231
- else:
232
- raise ValueError(
233
- f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
234
- )
235
-
236
- sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
237
- log_sigmas = np.log(sigmas)
238
- sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
239
-
240
- if self.config.use_karras_sigmas:
241
- sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
242
- timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
243
-
244
- sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
245
- sigmas = torch.from_numpy(sigmas).to(device=device)
246
- self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]])
247
-
248
- timesteps = torch.from_numpy(timesteps)
249
- timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)])
250
-
251
- if str(device).startswith("mps"):
252
- # mps does not support float64
253
- self.timesteps = timesteps.to(device, dtype=torch.float32)
254
- else:
255
- self.timesteps = timesteps.to(device=device)
256
-
257
- # empty dt and derivative
258
- self.prev_derivative = None
259
- self.dt = None
260
-
261
- # for exp beta schedules, such as the one for `pipeline_shap_e.py`
262
- # we need an index counter
263
- self._index_counter = defaultdict(int)
264
-
265
- # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
266
- def _sigma_to_t(self, sigma, log_sigmas):
267
- # get log sigma
268
- log_sigma = np.log(sigma)
269
-
270
- # get distribution
271
- dists = log_sigma - log_sigmas[:, np.newaxis]
272
-
273
- # get sigmas range
274
- low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
275
- high_idx = low_idx + 1
276
-
277
- low = log_sigmas[low_idx]
278
- high = log_sigmas[high_idx]
279
-
280
- # interpolate sigmas
281
- w = (low - log_sigma) / (low - high)
282
- w = np.clip(w, 0, 1)
283
-
284
- # transform interpolation to time range
285
- t = (1 - w) * low_idx + w * high_idx
286
- t = t.reshape(sigma.shape)
287
- return t
288
-
289
- # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
290
- def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor:
291
- """Constructs the noise schedule of Karras et al. (2022)."""
292
-
293
- sigma_min: float = in_sigmas[-1].item()
294
- sigma_max: float = in_sigmas[0].item()
295
-
296
- rho = 7.0 # 7.0 is the value used in the paper
297
- ramp = np.linspace(0, 1, num_inference_steps)
298
- min_inv_rho = sigma_min ** (1 / rho)
299
- max_inv_rho = sigma_max ** (1 / rho)
300
- sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
301
- return sigmas
302
-
303
- @property
304
- def state_in_first_order(self):
305
- return self.dt is None
306
-
307
- def step(
308
- self,
309
- model_output: Union[torch.FloatTensor, np.ndarray],
310
- timestep: Union[float, torch.FloatTensor],
311
- sample: Union[torch.FloatTensor, np.ndarray],
312
- return_dict: bool = True,
313
- ) -> Union[SchedulerOutput, Tuple]:
314
- """
315
- Args:
316
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
317
- process from the learned model outputs (most often the predicted noise).
318
- model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. timestep
319
- (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor` or `np.ndarray`):
320
- current instance of sample being created by diffusion process.
321
- return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
322
- Returns:
323
- [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`:
324
- [`~schedulers.scheduling_utils.SchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
325
- returning a tuple, the first element is the sample tensor.
326
- """
327
- step_index = self.index_for_timestep(timestep)
328
-
329
- # advance index counter by 1
330
- timestep_int = timestep.cpu().item() if torch.is_tensor(timestep) else timestep
331
- self._index_counter[timestep_int] += 1
332
-
333
- if self.state_in_first_order:
334
- sigma = self.sigmas[step_index]
335
- sigma_next = self.sigmas[step_index + 1]
336
- else:
337
- # 2nd order / Heun's method
338
- sigma = self.sigmas[step_index - 1]
339
- sigma_next = self.sigmas[step_index]
340
-
341
- # currently only gamma=0 is supported. This usually works best anyways.
342
- # We can support gamma in the future but then need to scale the timestep before
343
- # passing it to the model which requires a change in API
344
- gamma = 0
345
- sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
346
-
347
- # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
348
- if self.config.prediction_type == "epsilon":
349
- sigma_input = sigma_hat if self.state_in_first_order else sigma_next
350
- pred_original_sample = sample - sigma_input * model_output
351
- elif self.config.prediction_type == "v_prediction":
352
- sigma_input = sigma_hat if self.state_in_first_order else sigma_next
353
- pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
354
- sample / (sigma_input**2 + 1)
355
- )
356
- elif self.config.prediction_type == "sample":
357
- pred_original_sample = model_output
358
- else:
359
- raise ValueError(
360
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
361
- )
362
-
363
- if self.config.clip_sample:
364
- pred_original_sample = pred_original_sample.clamp(
365
- -self.config.clip_sample_range, self.config.clip_sample_range
366
- )
367
-
368
- if self.state_in_first_order:
369
- # 2. Convert to an ODE derivative for 1st order
370
- derivative = (sample - pred_original_sample) / sigma_hat
371
- # 3. delta timestep
372
- dt = sigma_next - sigma_hat
373
-
374
- # store for 2nd order step
375
- self.prev_derivative = derivative
376
- self.dt = dt
377
- self.sample = sample
378
- else:
379
- # 2. 2nd order / Heun's method
380
- derivative = (sample - pred_original_sample) / sigma_next
381
- derivative = (self.prev_derivative + derivative) / 2
382
-
383
- # 3. take prev timestep & sample
384
- dt = self.dt
385
- sample = self.sample
386
-
387
- # free dt and derivative
388
- # Note, this puts the scheduler in "first order mode"
389
- self.prev_derivative = None
390
- self.dt = None
391
- self.sample = None
392
-
393
- prev_sample = sample + derivative * dt
394
-
395
- if not return_dict:
396
- return (prev_sample,)
397
-
398
- return SchedulerOutput(prev_sample=prev_sample)
399
-
400
- def add_noise(
401
- self,
402
- original_samples: torch.FloatTensor,
403
- noise: torch.FloatTensor,
404
- timesteps: torch.FloatTensor,
405
- ) -> torch.FloatTensor:
406
- # Make sure sigmas and timesteps have the same device and dtype as original_samples
407
- sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
408
- if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
409
- # mps does not support float64
410
- schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32)
411
- timesteps = timesteps.to(original_samples.device, dtype=torch.float32)
412
- else:
413
- schedule_timesteps = self.timesteps.to(original_samples.device)
414
- timesteps = timesteps.to(original_samples.device)
415
-
416
- step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps]
417
-
418
- sigma = sigmas[step_indices].flatten()
419
- while len(sigma.shape) < len(original_samples.shape):
420
- sigma = sigma.unsqueeze(-1)
421
-
422
- noisy_samples = original_samples + noise * sigma
423
- return noisy_samples
424
-
425
- def __len__(self):
426
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w18',
4
- backbone=dict(
5
- extra=dict(
6
- stage2=dict(num_channels=(18, 36)),
7
- stage3=dict(num_channels=(18, 36, 72)),
8
- stage4=dict(num_channels=(18, 36, 72, 144)))),
9
- neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py DELETED
@@ -1,52 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/retinanet_r50_fpn.py',
3
- '../_base_/datasets/coco_detection.py',
4
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
5
- ]
6
- # model settings
7
- model = dict(
8
- pretrained='torchvision://resnet101',
9
- backbone=dict(depth=101),
10
- bbox_head=dict(
11
- _delete_=True,
12
- type='SABLRetinaHead',
13
- num_classes=80,
14
- in_channels=256,
15
- stacked_convs=4,
16
- feat_channels=256,
17
- approx_anchor_generator=dict(
18
- type='AnchorGenerator',
19
- octave_base_scale=4,
20
- scales_per_octave=3,
21
- ratios=[0.5, 1.0, 2.0],
22
- strides=[8, 16, 32, 64, 128]),
23
- square_anchor_generator=dict(
24
- type='AnchorGenerator',
25
- ratios=[1.0],
26
- scales=[4],
27
- strides=[8, 16, 32, 64, 128]),
28
- bbox_coder=dict(
29
- type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
30
- loss_cls=dict(
31
- type='FocalLoss',
32
- use_sigmoid=True,
33
- gamma=2.0,
34
- alpha=0.25,
35
- loss_weight=1.0),
36
- loss_bbox_cls=dict(
37
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
38
- loss_bbox_reg=dict(
39
- type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
40
- # training and testing settings
41
- train_cfg=dict(
42
- assigner=dict(
43
- type='ApproxMaxIoUAssigner',
44
- pos_iou_thr=0.5,
45
- neg_iou_thr=0.4,
46
- min_pos_iou=0.0,
47
- ignore_iof_thr=-1),
48
- allowed_border=-1,
49
- pos_weight=-1,
50
- debug=False))
51
- # optimizer
52
- optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/retina_sepbn_head.py DELETED
@@ -1,113 +0,0 @@
1
- import torch.nn as nn
2
- from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
3
-
4
- from ..builder import HEADS
5
- from .anchor_head import AnchorHead
6
-
7
-
8
- @HEADS.register_module()
9
- class RetinaSepBNHead(AnchorHead):
10
- """"RetinaHead with separate BN.
11
-
12
- In RetinaHead, conv/norm layers are shared across different FPN levels,
13
- while in RetinaSepBNHead, conv layers are shared across different FPN
14
- levels, but BN layers are separated.
15
- """
16
-
17
- def __init__(self,
18
- num_classes,
19
- num_ins,
20
- in_channels,
21
- stacked_convs=4,
22
- conv_cfg=None,
23
- norm_cfg=None,
24
- **kwargs):
25
- self.stacked_convs = stacked_convs
26
- self.conv_cfg = conv_cfg
27
- self.norm_cfg = norm_cfg
28
- self.num_ins = num_ins
29
- super(RetinaSepBNHead, self).__init__(num_classes, in_channels,
30
- **kwargs)
31
-
32
- def _init_layers(self):
33
- """Initialize layers of the head."""
34
- self.relu = nn.ReLU(inplace=True)
35
- self.cls_convs = nn.ModuleList()
36
- self.reg_convs = nn.ModuleList()
37
- for i in range(self.num_ins):
38
- cls_convs = nn.ModuleList()
39
- reg_convs = nn.ModuleList()
40
- for i in range(self.stacked_convs):
41
- chn = self.in_channels if i == 0 else self.feat_channels
42
- cls_convs.append(
43
- ConvModule(
44
- chn,
45
- self.feat_channels,
46
- 3,
47
- stride=1,
48
- padding=1,
49
- conv_cfg=self.conv_cfg,
50
- norm_cfg=self.norm_cfg))
51
- reg_convs.append(
52
- ConvModule(
53
- chn,
54
- self.feat_channels,
55
- 3,
56
- stride=1,
57
- padding=1,
58
- conv_cfg=self.conv_cfg,
59
- norm_cfg=self.norm_cfg))
60
- self.cls_convs.append(cls_convs)
61
- self.reg_convs.append(reg_convs)
62
- for i in range(self.stacked_convs):
63
- for j in range(1, self.num_ins):
64
- self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
65
- self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
66
- self.retina_cls = nn.Conv2d(
67
- self.feat_channels,
68
- self.num_anchors * self.cls_out_channels,
69
- 3,
70
- padding=1)
71
- self.retina_reg = nn.Conv2d(
72
- self.feat_channels, self.num_anchors * 4, 3, padding=1)
73
-
74
- def init_weights(self):
75
- """Initialize weights of the head."""
76
- for m in self.cls_convs[0]:
77
- normal_init(m.conv, std=0.01)
78
- for m in self.reg_convs[0]:
79
- normal_init(m.conv, std=0.01)
80
- bias_cls = bias_init_with_prob(0.01)
81
- normal_init(self.retina_cls, std=0.01, bias=bias_cls)
82
- normal_init(self.retina_reg, std=0.01)
83
-
84
- def forward(self, feats):
85
- """Forward features from the upstream network.
86
-
87
- Args:
88
- feats (tuple[Tensor]): Features from the upstream network, each is
89
- a 4D-tensor.
90
-
91
- Returns:
92
- tuple: Usually a tuple of classification scores and bbox prediction
93
- cls_scores (list[Tensor]): Classification scores for all scale
94
- levels, each is a 4D-tensor, the channels number is
95
- num_anchors * num_classes.
96
- bbox_preds (list[Tensor]): Box energies / deltas for all scale
97
- levels, each is a 4D-tensor, the channels number is
98
- num_anchors * 4.
99
- """
100
- cls_scores = []
101
- bbox_preds = []
102
- for i, x in enumerate(feats):
103
- cls_feat = feats[i]
104
- reg_feat = feats[i]
105
- for cls_conv in self.cls_convs[i]:
106
- cls_feat = cls_conv(cls_feat)
107
- for reg_conv in self.reg_convs[i]:
108
- reg_feat = reg_conv(reg_feat)
109
- cls_score = self.retina_cls(cls_feat)
110
- bbox_pred = self.retina_reg(reg_feat)
111
- cls_scores.append(cls_score)
112
- bbox_preds.append(bbox_pred)
113
- return cls_scores, bbox_preds
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtificialArtist007/Rate-my-Aiart/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Rate My Aiart
3
- emoji: 🔥
4
- colorFrom: blue
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.19.1
8
- app_file: app.py
9
- pinned: false
10
- license: other
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/compat.py DELETED
@@ -1,63 +0,0 @@
1
- """Stuff that differs in different Python versions and platform
2
- distributions."""
3
-
4
- import logging
5
- import os
6
- import sys
7
-
8
- __all__ = ["get_path_uid", "stdlib_pkgs", "WINDOWS"]
9
-
10
-
11
- logger = logging.getLogger(__name__)
12
-
13
-
14
- def has_tls() -> bool:
15
- try:
16
- import _ssl # noqa: F401 # ignore unused
17
-
18
- return True
19
- except ImportError:
20
- pass
21
-
22
- from pip._vendor.urllib3.util import IS_PYOPENSSL
23
-
24
- return IS_PYOPENSSL
25
-
26
-
27
- def get_path_uid(path: str) -> int:
28
- """
29
- Return path's uid.
30
-
31
- Does not follow symlinks:
32
- https://github.com/pypa/pip/pull/935#discussion_r5307003
33
-
34
- Placed this function in compat due to differences on AIX and
35
- Jython, that should eventually go away.
36
-
37
- :raises OSError: When path is a symlink or can't be read.
38
- """
39
- if hasattr(os, "O_NOFOLLOW"):
40
- fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
41
- file_uid = os.fstat(fd).st_uid
42
- os.close(fd)
43
- else: # AIX and Jython
44
- # WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW
45
- if not os.path.islink(path):
46
- # older versions of Jython don't have `os.fstat`
47
- file_uid = os.stat(path).st_uid
48
- else:
49
- # raise OSError for parity with os.O_NOFOLLOW above
50
- raise OSError(f"{path} is a symlink; Will not return uid for symlinks")
51
- return file_uid
52
-
53
-
54
- # packages in the stdlib that may have installation metadata, but should not be
55
- # considered 'installed'. this theoretically could be determined based on
56
- # dist.location (py27:`sysconfig.get_paths()['stdlib']`,
57
- # py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may
58
- # make this ineffective, so hard-coding
59
- stdlib_pkgs = {"python", "wsgiref", "argparse"}
60
-
61
-
62
- # windows detection, covers cpython and ironpython
63
- WINDOWS = sys.platform.startswith("win") or (sys.platform == "cli" and os.name == "nt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/datasets/images_dataset.py DELETED
@@ -1,33 +0,0 @@
1
- from torch.utils.data import Dataset
2
- from PIL import Image
3
- from utils import data_utils
4
-
5
-
6
- class ImagesDataset(Dataset):
7
-
8
- def __init__(self, source_root, target_root, opts, target_transform=None, source_transform=None):
9
- self.source_paths = sorted(data_utils.make_dataset(source_root))
10
- self.target_paths = sorted(data_utils.make_dataset(target_root))
11
- self.source_transform = source_transform
12
- self.target_transform = target_transform
13
- self.opts = opts
14
-
15
- def __len__(self):
16
- return len(self.source_paths)
17
-
18
- def __getitem__(self, index):
19
- from_path = self.source_paths[index]
20
- from_im = Image.open(from_path)
21
- from_im = from_im.convert('RGB')
22
-
23
- to_path = self.target_paths[index]
24
- to_im = Image.open(to_path).convert('RGB')
25
- if self.target_transform:
26
- to_im = self.target_transform(to_im)
27
-
28
- if self.source_transform:
29
- from_im = self.source_transform(from_im)
30
- else:
31
- from_im = to_im
32
-
33
- return from_im, to_im
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/9no Amanecer Rpg Mod Apk.md DELETED
@@ -1,84 +0,0 @@
1
- <br />
2
- <h1>9th Dawn RPG Mod APK: Una guía para la aventura definitiva</h1>
3
- <p>¿Estás buscando un juego de rol divertido e inmersivo que te mantenga enganchado durante horas? ¿Quieres experimentar un mundo vasto y abierto lleno de misterios, peligros y aventuras? Si usted respondió que sí, entonces usted debe tratar 9th Dawn RPG Mod APK, una versión modificada del popular juego 9th Dawn RPG por Valorware. En este artículo, te contaremos todo lo que necesitas saber sobre este increíble juego, incluyendo sus características, historia, beneficios de mod, instrucciones de descarga y consejos y trucos. ¡Sigue leyendo y prepárate para la aventura definitiva! </p>
4
- <h2>9no amanecer rpg mod apk</h2><br /><p><b><b>Download File</b> &raquo;&raquo;&raquo; <a href="https://bltlly.com/2v6LjG">https://bltlly.com/2v6LjG</a></b></p><br /><br />
5
- <h2>¿Qué es 9th Dawn RPG? </h2>
6
- <p>9th Dawn RPG es un juego de rol clásico que fue lanzado en 2012 por Valorware, un desarrollador de juegos independiente. El juego se desarrolla en la isla continente de Montelorne, una tierra alejada del continente, pero llena de misterio, peligro y aventura. Juegas como un héroe que llega a Montelorne para explorar sus secretos y enfrentar sus desafíos. Puedes elegir entre tres clases diferentes: guerrero, mago o arquero, y personalizar la apariencia, habilidades y equipo de tu personaje. También puedes interactuar con varios PNJ, unirte a facciones, completar misiones, recoger objetos, crear armas y armaduras, aprender hechizos, luchar contra enemigos y jefes, y mucho más. El juego tiene un estilo retro pixel art que le da un encanto nostálgico, y un dinámico ciclo día-noche que afecta el juego. El juego también tiene un enorme mundo abierto que puedes explorar libremente, con más de 300 mapas para descubrir. </p>
7
- <h3>Características de 9th Dawn RPG</h3>
8
- <p>Algunas de las características que hacen que el 9th Dawn RPG se destaque son:</p>
9
- <ul>
10
- <li>Un mundo grande y diverso con más de 300 mapas para explorar, incluyendo bosques, cuevas, mazmorras, pueblos, castillos, islas y más. </li>
11
- <li>Una historia rica y atractiva con múltiples finales dependiendo de sus opciones y acciones. </li>
12
- <li>Un dinámico ciclo día-noche que afecta el entorno, los PNJ, los enemigos y las misiones. </li>
13
-
14
- <li>Un sistema de combate complejo que te permite usar armas cuerpo a cuerpo, armas a distancia, escudos, hechizos, pociones, trampas y más. </li>
15
- <li>Un sistema de personalización de personajes que te permite elegir entre tres clases (guerrero, mago o arquero), seleccionar tu género y apariencia, distribuir tus atributos (fuerza, agilidad, inteligencia) y aprender habilidades y hechizos. </li>
16
- <li>Un sistema de equipos que le permite recoger y elaborar varios artículos como armas, armaduras, accesorios, consumibles, etc.</li>
17
- <li>Un sistema de inventario que te permite administrar tus artículos y equiparlos en tu personaje. </li>
18
- <li>Un sistema de facciones que te permite unirte a una de las cuatro facciones en Montelorne: La Orden del León (el ejército real), Los Caballeros de las Sombras (los rebeldes), La Sociedad Arcana (los magos), o La Hermandad (los ladrones). </li>
19
- <li>Un sistema de búsqueda que te permite aceptar y completar varias tareas de NPC o facciones. </li>
20
- <li>Un sistema de diálogo que le permite interactuar con los PNJ y elegir sus respuestas. </li>
21
- <li>Un sistema de guardado que te permite guardar tu progreso en cualquier momento. </li>
22
- </ul>
23
- <h3>Historia y escenario del 9º Amanecer RPG</h3>
24
- <p>La historia de 9th Dawn RPG tiene lugar en la isla continente de Montelorne, una tierra que una vez fue parte de un gran imperio llamado Esteria. Sin embargo, debido a un evento cataclísmico conocido como la Gran Guerra, Montelorne fue separado del continente y sumido en el caos. El imperio colapsó, y cuatro facciones surgieron para competir por el poder y la influencia: La Orden del León, Los Caballeros de la Sombra, La Sociedad Arcana y La Hermandad. Eres un héroe que llega a Montelorne para explorar sus secretos y afrontar sus retos. Puedes elegir alinearte con una de las facciones, o permanecer neutral y forjar tu propio destino. Sus acciones y elecciones darán forma al destino de Montelorne y su gente. </p>
25
- <h2>¿Qué es 9th Dawn RPG Mod APK? </h2>
26
-
27
- <h3>Beneficios de 9th Dawn RPG Mod APK</h3>
28
- <p>Algunos de los beneficios que se pueden disfrutar mediante el uso de 9th Dawn RPG Mod APK son:</p>
29
- <ul>
30
- <li>Dinero ilimitado: Puedes obtener monedas y gemas ilimitadas que puedes usar para comprar artículos, mejorar tu equipo, aprender habilidades y hechizos, etc.</li>
31
- <li>Artículos desbloqueados: Puedes acceder a todos los objetos del juego, incluyendo armas, armaduras, accesorios, consumibles, etc., sin tener que recogerlos o crearlos. </li>
32
- <li>Mapas desbloqueados: Puedes explorar todos los mapas del juego, incluidos los ocultos, sin tener que desbloquearlos completando misiones o encontrando claves. </li>
33
- <li>Sin anuncios: Puedes jugar el juego sin interrupciones o distracciones de anuncios molestos. </li>
34
- </ul>
35
- <h3>Cómo descargar e instalar 9th Dawn RPG Mod APK</h3>
36
- <p>Para descargar e instalar 9th Dawn RPG Mod APK, debe seguir estos pasos:</p>
37
- <ol>
38
- <li>Ir a un sitio web confiable que ofrece 9th Dawn RPG Mod APK para su descarga gratuita. Por ejemplo, puede utilizar este enlace: . </li>
39
- <li>Haga clic en el botón de descarga y espere a que el archivo se descargue en su dispositivo. </li>
40
- <li>Una vez descargado el archivo, vaya a la configuración de su dispositivo y habilite la opción de instalar aplicaciones de fuentes desconocidas. Esto le permitirá instalar APK mod que no son de Google Play Store.</li>
41
- <li>Localice el archivo descargado en su dispositivo y toque en él para iniciar el proceso de instalación. </li>
42
- <li> Siga las instrucciones en la pantalla y espere a que se complete la instalación. </li>
43
- <li>Iniciar el juego y disfrutar! </li>
44
- </ol>
45
- <h2>Consejos y trucos para jugar 9th Dawn RPG Mod APK</h2>
46
- <p>Ahora que ha descargado e instalado 9th Dawn RPG Mod APK, usted está listo para comenzar su aventura en Montelorne. Aquí hay algunos consejos y trucos que te ayudarán a aprovechar al máximo tu experiencia de juego:</p>
47
- <p></p>
48
- <h3>Explora el mundo de Montelorne</h3>
49
-
50
- <h3>Personaliza tu personaje y equipo</h3>
51
- <p>Otra gran cosa sobre 9th Dawn RPG es su sistema de personalización de personajes que le permite crear su propio héroe único. Puedes elegir entre tres clases: guerrero, mago o arquero, y seleccionar tu género y apariencia. También puedes distribuir tus atributos (fuerza, agilidad, inteligencia) y aprender habilidades y hechizos que se adapten a tu estilo de juego. También puedes recoger y elaborar varios artículos como armas, armaduras, accesorios, consumibles, etc., y equiparlos en tu personaje. Puedes encontrar objetos explorando el mundo, completando misiones, derrotando enemigos, abriendo cofres, etc. También puedes crear objetos usando materiales y recetas que puedas encontrar o comprar. Puede actualizar su equipo utilizando gemas que puede encontrar o comprar. También puede encantar su equipo utilizando pergaminos que puede encontrar o comprar. Puedes personalizar tu personaje y equipo en cualquier momento accediendo al menú. </p>
52
- <h3>Aprender habilidades y hechizos</h3>
53
- <p>Las habilidades y los hechizos son habilidades especiales que puedes usar en combate o exploración. Pueden ayudarte a infligir más daño, curarte a ti mismo o a tus aliados, pulirte a ti mismo o a ellos, desbaratar enemigos, escapar del peligro, etc. Puedes aprender habilidades y hechizos nivelando tu personaje, uniéndote a facciones, completando misiones, encontrar libros, etc. También puede mejorar sus habilidades y hechizos mediante el uso de puntos de habilidad que gana por subir de nivel. Puedes acceder a tus habilidades y hechizos tocando los iconos en la esquina inferior derecha de la pantalla. También puede asignarlos a ranuras rápidas para facilitar el acceso. Puedes usar habilidades y hechizos tocando sus iconos o presionando los botones correspondientes en tu dispositivo. Sin embargo, ten en cuenta que las habilidades y los hechizos consumen resistencia o maná, que están indicados por las barras azules y verdes en la esquina superior izquierda de la pantalla. Tienes que esperar a que se regeneren antes de poder usarlas de nuevo. </p>
54
- <h3>Lucha contra enemigos y jefes</h3>
55
-
56
- <h3>Unirse a facciones y misiones</h3>
57
- <p>Las facciones y las misiones son aspectos opcionales pero gratificantes del RPG de 9th Dawn. Las facciones son grupos de PNJ que tienen sus propias metas, creencias y agendas. Puedes unirte a una de las cuatro facciones en Montelorne: La Orden del León, Los Caballeros de la Sombra, La Sociedad Arcana o La Hermandad. Cada facción tiene su propio líder, cuartel general, miembros, aliados, enemigos y reputación. Puedes aumentar tu reputación con una facción completando misiones, ayudando a miembros, donando artículos, etc. También puedes disminuir tu reputación con una facción atacando miembros, robando artículos, traicionando aliados, etc. Tu reputación con una facción afecta cómo te tratan, qué misiones te ofrecen, qué recompensas te dan, etc. También puedes cambiar de facciones en cualquier momento hablando con el líder de la facción o usando un artículo especial. Sin embargo, ten cuidado al unirte o abandonar facciones, ya que puedes perder algunos beneficios o ganar algunos enemigos. Las misiones son tareas que puedes aceptar y completar desde NPC o facciones. Pueden involucrar varios objetivos como matar enemigos, encontrar objetos, entregar mensajes, escoltar aliados, resolver puzzles, etc. También pueden tener diferentes dificultades, recompensas, límites de tiempo, consecuencias, etc. Puedes encontrar misiones hablando con NPC, visitar ubicaciones, leer avisos, etc. También puede rastrear sus misiones activas accediendo al menú. Puedes completar las misiones cumpliendo los objetivos y regresando al dador de la misión. También puedes fallar misiones ignorando los objetivos, quedándote sin tiempo, matando al dador de misiones, etc. Las misiones pueden ayudarte a ganar experiencia, dinero, objetos, reputación, habilidades, hechizos, etc. También pueden ayudarte a avanzar en la historia o desbloquear nuevas áreas. </p>
58
- <h2>Conclusión</h2>
59
-
60
- <h3>Resumen del artículo</h3>
61
- <p>En este artículo, hemos cubierto los siguientes temas:</p>
62
- <ul>
63
- <li>¿Qué es 9th Dawn RPG? </li>
64
- <li> ¿Qué es 9th Dawn RPG Mod APK? </li>
65
- <li>Beneficios de 9th Dawn RPG Mod APK</li>
66
- <li> Cómo descargar e instalar 9th Dawn RPG Mod APK</li>
67
- <li> Consejos y trucos para jugar 9th Dawn RPG Mod APK</li>
68
- </ul>
69
- <h3>Preguntas frecuentes</h3>
70
- <p>Aquí hay algunas preguntas frecuentes sobre 9th Dawn RPG Mod APK:</p>
71
- <ol>
72
- <li> ¿Es seguro usar 9th Dawn RPG Mod APK? </li>
73
- <p>9th Dawn RPG Mod APK es generalmente seguro de usar si se descarga desde una fuente de confianza y escanear con software antivirus antes de instalarlo. Sin embargo, debes tener en cuenta que los mod APK no están autorizados por el desarrollador original del juego y pueden contener errores o errores que pueden afectar el rendimiento del juego o del dispositivo. También debe tener cuidado al conceder permisos a los mod APK, ya que pueden acceder a sus datos personales o funciones del dispositivo sin su consentimiento. </p>
74
- <li>Es 9th Dawn RPG Mod APK compatible con mi dispositivo? </li>
75
- <p>9th Dawn RPG Mod APK es compatible con la mayoría de los dispositivos Android que tienen sistema operativo Android 4.0 o superior y al menos 1 GB de RAM y 100 MB de espacio de almacenamiento libre. Sin embargo, debe comprobar las especificaciones y requisitos del mod APK antes de descargarlo e instalarlo para asegurarse de que funciona correctamente en su dispositivo. </p>
76
- <li> ¿Puedo jugar 9th Dawn RPG Mod APK en línea o fuera de línea? </li>
77
- <p>9th Dawn RPG Mod APK es principalmente un juego fuera de línea que no requiere una conexión a Internet para jugar. Sin embargo, es posible que necesite una conexión a Internet para descargar e instalar el mod APK, para acceder a algunas características en línea, como tablas de clasificación o logros, o para actualizar el mod APK a la última versión. </p>
78
- <li> ¿Puedo jugar 9th Dawn RPG Mod APK con mis amigos? </li>
79
-
80
- <li> ¿Puedo transferir mi progreso de 9th Dawn RPG a 9th Dawn RPG Mod APK o viceversa? </li>
81
- <p>No, no puede transferir su progreso de 9th Dawn RPG a 9th Dawn RPG Mod APK o viceversa. El mod APK tiene una estructura de archivos diferente y formato de datos que el juego original, y no son compatibles entre sí. Si quieres cambiar entre las dos versiones, tendrás que empezar desde cero. </p>
82
- </ol></p> 64aa2da5cf<br />
83
- <br />
84
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Android Stalker.md DELETED
@@ -1,58 +0,0 @@
1
- <br />
2
- <h1>Android acosador: Un término con múltiples significados</h1>
3
- <p>Cuando escuchas el término acosador androide, ¿qué te viene a la mente? ¿Es una aplicación maliciosa que espía las actividades de tu teléfono? ¿Es una serie de videojuegos que te sumerge en un mundo post-apocalíptico? ¿O es un personaje de televisión que desafía tu percepción de la humanidad? En este artículo, exploraremos estos diferentes significados de stalker android y cómo se relacionan entre sí. </p>
4
- <h2>android stalker</h2><br /><p><b><b>DOWNLOAD</b> &ndash;&ndash;&ndash; <a href="https://bltlly.com/2v6JLg">https://bltlly.com/2v6JLg</a></b></p><br /><br />
5
- <h2>Android acosador como un tipo de malware</h2>
6
- <p>Uno de los significados más comunes y perturbadores de stalker android es un tipo de malware que rastrea o monitorea secretamente la actividad de su dispositivo. Estas aplicaciones también se conocen como stalkerware o spyware, y a menudo son instaladas por alguien que quiere espiarte sin tu consentimiento, como un compañero abusivo, un ex o un hacker. Las aplicaciones de stalkerware pueden acceder a tu ubicación, conversaciones, fotos, contraseñas y más, y enviarlas a un tercero. También pueden encender el micrófono o la cámara de forma remota para ver y escuchar lo que está sucediendo a su alrededor. </p>
7
- <p>Las aplicaciones de stalkerware representan serias amenazas para su privacidad, seguridad y seguridad. Pueden exponer su información personal al robo de identidad, chantaje, acoso o violencia. También pueden comprometer el rendimiento de su dispositivo, la duración de la batería y el uso de datos. Además, pueden violar su confianza y dignidad como ser humano. </p>
8
- <p>¿Cómo se puede saber si hay una aplicación stalkerware en su dispositivo Android? Según los expertos en seguridad, algunos signos que pueden indicar stalkerware incluyen:</p>
9
- <ul>
10
- <li>El abusador ha tenido acceso físico a su dispositivo</li>
11
- <li>El abusador sabe mucha información específica sobre usted que no debería</li>
12
- <li>La batería del dispositivo se agota más rápido de lo habitual</li>
13
- <li> Hay un aumento inexplicable en el uso de datos</li>
14
- <li>Hay cambios inesperados en la configuración del dispositivo</li>
15
- </ul>
16
- <p>Si sospecha que hay una aplicación stalkerware en su dispositivo, aquí hay algunos pasos que puede tomar:</p>
17
- <ul>
18
-
19
- <li>Compruebe si su dispositivo ha sido "arraigado" o "jailbreak". Esto significa que alguien ha ganado el control total sobre el sistema operativo de su dispositivo. Puedes usar aplicaciones como Root Checker o Certo para analizar tu dispositivo en busca de rooteo o jailbreak. </li>
20
- <li>Escanea tu dispositivo con software antivirus o anti-malware. Algunas aplicaciones como MalwareBytes, NortonLifeLock o Lookout pueden detectar stalkerware y eliminarlo. </li>
21
- <li>Cambia tus contraseñas para todas tus cuentas. Usa contraseñas fuertes y únicas que sean difíciles de adivinar. </li>
22
- <li>Restablecimiento de fábrica del dispositivo. Esto borrará todos los datos y aplicaciones de su dispositivo y lo restaurará a su configuración original. Asegúrese de hacer una copia de seguridad de sus datos importantes antes de hacer esto. </li>
23
- </ul>
24
- <h2>Stalker Android como una serie de videojuegos</h2>
25
- <p>Otro significado de stalker android es una serie de videojuegos que te sumerge en un mundo post-apocalíptico. La serie se llama S.T.A.L.K.E.R., que significa carroñeros, intrusos, aventureros, solitarios, asesinos, exploradores y ladrones. Estos son los nombres de las personas que se aventuran en la Zona, un área alrededor de la Central Nuclear de Chernobyl que ha sido afectada por un segundo desastre nuclear en 2006. La Zona está llena de peligros, como criaturas mutadas, facciones hostiles y fenómenos anómalos. Sin embargo, también ofrece oportunidades, como valiosos artefactos, secretos y misterios. </p>
26
- <p></p>
27
- <p>La serie S.T.A.L.K.E.R. consta de tres juegos principales: Shadow of Chernobyl (2007), Clear Sky (2008) y Call of Pripyat (2009). Cada juego tiene un protagonista y una historia diferentes, pero todos comparten el mismo escenario y elementos de juego. Algunas de las principales características y temas de los juegos son:</p>
28
- <ul>
29
- <li>Exploración: Los juegos te permiten deambular libremente en el mundo abierto de la Zona, descubriendo nuevos lugares, misiones, personajes y eventos. También puede interactuar con el entorno, como recoger objetos, usar vehículos o activar trampas. </li>
30
-
31
- <li>Anomalías: Los juegos presentan anomalías, que son fenómenos extraños y a menudo mortales que desafían las leyes de la física. Las anomalías pueden tener diferentes efectos, como quemarlo, electrocutarlo o teletransportarlo. Puede usar detectores o pernos para localizarlo y evitarlo. </li>
32
- <li>Facciones: Los juegos cuentan con facciones, que son grupos de acosadores con diferentes objetivos e ideologías. Las facciones pueden ser amistosas, neutrales u hostiles para usted dependiendo de sus acciones y reputación. Puedes unirte o aliarte con algunas facciones, o luchar contra ellas. </li>
33
- </ul>
34
- <p>La serie S.T.A.L.K.E.R. ha sido elogiada por su juego atmosférico e inmersivo, su sistema de IA realista y dinámico, su construcción del mundo rica y detallada, y su narración no lineal y emergente. Sin embargo, también se ha enfrentado a algunos desafíos y controversias, como errores y problemas técnicos, disputas legales sobre los derechos de propiedad intelectual, problemas de censura en algunos países y la insatisfacción de los fans con algunos aspectos de los juegos. A pesar de estas dificultades, la serie ha ganado una base de seguidores leales y de culto a lo largo de los años. </p>
35
- <h2>Stalker Android como un personaje de programa de televisión</h2>
36
- <p>El tercer significado de Android acosador es un personaje de programa de televisión que desafía su percepción de la humanidad. El personaje es Dorian, un compañero androide de un policía humano en Almost Human, un drama de ciencia ficción que se emitió en 2013-2014. El espectáculo se desarrolla en 2048, donde el crimen ha aumentado en un 400% y cada oficial de policía humano se empareja con un socio androide. El espectáculo sigue los casos y aventuras de John Kennex (Karl Urban), un detective que perdió su pierna y su memoria en una redada que salió mal, y Dorian (Michael Ealy), un modelo androide que fue dado de baja por ser demasiado emocional e impredecible. </p>
37
-
38
- <p>La premisa y la trama de Almost Human son similares a otras obras de ciencia ficción que exploran la relación entre humanos y androides, como Blade Runner, I, Robot o Detroit: Become Human. Sin embargo, el programa también agrega sus propios giros e innovaciones, como dispositivos, crímenes y tecnologías futuristas. Por ejemplo, el show presenta casos que involucran sexbots, manipulación de memoria, ingeniería genética e inteligencia artificial. </p>
39
- <p>El personaje de Dorian es uno de los aspectos más interesantes y atractivos de la serie. Es un androide que tiene un alma sintética, que le da una personalidad, un sentido del humor y una brújula moral. También es leal, compasivo y curioso sobre las emociones y experiencias humanas. A menudo actúa como una lámina y un amigo de Kennex, que es cínico, traumatizado y desconfiado de los androides. Juntos, forman una alianza improbable pero efectiva que desafía los estereotipos y prejuicios de su sociedad. </p>
40
- <p>Almost Human recibió críticas en su mayoría positivas de críticos y audiencias , que elogiaron su elenco, sus imágenes, su acción y su humor. Sin embargo, el programa también enfrentó algunos problemas, como bajas calificaciones, problemas de programación, emitir episodios fuera de orden y cancelación después de una temporada. Muchos fans se sintieron decepcionados por el abrupto final del programa y las preguntas sin resolver. Sin embargo, el programa todavía tiene un seguimiento de culto y un potencial para el renacimiento o reinicio. </p>
41
- <h2>Conclusión</h2>
42
- <p>En este artículo, hemos explorado tres significados diferentes de stalker android: un tipo de malware que espía la actividad de tu dispositivo, una serie de videojuegos que te sumerge en un mundo post-apocalíptico y un personaje de televisión que desafía tu percepción de la humanidad. Hemos visto cómo cada significado se relaciona con diferentes aspectos de la tecnología, la sociedad y la cultura. También hemos aprendido algunos hechos, consejos y opiniones sobre cada significado. </p>
43
-
44
- <h3>Preguntas frecuentes</h3>
45
- <ul>
46
- <li><b>¿Qué es stalkerware? </b></li>
47
- <li>Stalkerware es un tipo de malware que secretamente rastrea o monitorea la actividad de su dispositivo sin su consentimiento. Puede acceder a su ubicación, conversaciones, fotos, contraseñas y más. </li>
48
- <li><b>¿Qué es S.T.A.L.K.E.R.? </b></li>
49
- <li>S.T.A.L.K.E.R. es una serie de videojuegos que te sumerge en un mundo post-apocalíptico alrededor de la central nuclear de Chernobyl. Juegas como un acosador que explora la Zona, un área llena de peligros y oportunidades. </li>
50
- <li><b>¿Qué es casi humano? </b></li>
51
- <li>Almost Human es un drama de ciencia ficción que se emitió en 2013-2014. Se desarrolla en 2048, donde cada policía humano se empareja con un socio androide. Sigue los casos y aventuras de John Kennex y Dorian, un dúo improbable pero efectivo. </li>
52
- <li><b>¿Cómo puedo eliminar stalkerware de mi dispositivo Android? </b></li>
53
- <li>Puede eliminar stalkerware escaneando su dispositivo con software antivirus o antimalware, cambiando sus contraseñas para todas sus cuentas o restableciendo su dispositivo de fábrica. </li>
54
- <li><b>¿Cómo puedo jugar juegos de S.T.A.L.K.E.R. ? </b></li>
55
- <li>Puedes jugar juegos de S.T.A.L.K.E.R. en PC o Xbox 360. También puedes descargar mods o versiones hechas por fans de los juegos para más características y contenido. </li>
56
- </ul></p> 64aa2da5cf<br />
57
- <br />
58
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Apk De La Saga Del Verano.md DELETED
@@ -1,59 +0,0 @@
1
- <br />
2
- <h1>Descargar Imperio Mundial 2027 APK y llevar a su país a la gloria</h1>
3
- <p>¿Tienes lo que se necesita para ser un líder supremo en un mundo de caos? ¿Quieres experimentar un juego de estrategia por turnos realista e inmersivo que te permite elegir entre más de 180 países y llevarlos a la victoria o la derrota? Si es así, entonces usted debe descargar Imperio Mundial 2027 APK, un juego que desafiará sus habilidades de liderazgo y el pensamiento estratégico. </p>
4
- <p>World Empire 2027 es un juego desarrollado por iGindis Games, una empresa que se especializa en crear juegos que simulan escenarios y eventos del mundo real. El juego se desarrolla en el año 2027, donde el mundo está en crisis debido al colapso económico, la inestabilidad política, el malestar social y los desastres ambientales. Ustedes son el líder de uno de los países, y tienen que tomar decisiones que afectarán a su nación y al mundo. Puedes usar la diplomacia, la guerra, la tecnología, la economía y el espionaje para construir tu imperio y competir con otros jugadores en línea o localmente en modo multijugador. </p>
5
- <h2>apk de la saga del verano</h2><br /><p><b><b>Download</b> &#10003;&#10003;&#10003; <a href="https://bltlly.com/2v6Lw7">https://bltlly.com/2v6Lw7</a></b></p><br /><br />
6
- <p>En este artículo, le mostraremos cómo descargar World Empire 2027 APK en sus dispositivos Android y PC con Windows, y también destacaremos algunas de las características y consejos del juego. Así que, sin más preámbulos, ¡empecemos! </p>
7
- <h2> Cómo descargar Imperio Mundial 2027 APK en dispositivos Android</h2>
8
- <p>Si desea descargar World Empire 2027 APK en sus dispositivos Android, puede seguir estos sencillos pasos:</p>
9
- <ol>
10
- <li>Ir al sitio web oficial de World Empire 2027 en <a href="">https://www.igindis.com/</a> o Uptodown en <a href=">https://world-empire-2027.en.uptodown.com/android</a> y encontrar el botón de descarga para el juego. </li>
11
- <li>Haga clic en el botón de descarga y espere a que el archivo APK se descargue en su dispositivo. El tamaño del archivo es de unos 70 MB, así que asegúrese de tener suficiente espacio y una buena conexión a Internet. </li>
12
-
13
- <li>Busque el archivo APK en su dispositivo y toque en él para instalarlo. Siga las instrucciones en la pantalla y acepte los permisos requeridos por el juego. </li>
14
- <li>Después de la instalación se hace, puede iniciar el juego y disfrutar de jugar Imperio Mundial 2027 APK en sus dispositivos Android. </li>
15
- </ol>
16
- <h2> Cómo descargar Imperio Mundial 2027 APK en Windows PC</h2>
17
- <p>Si desea descargar World Empire 2027 APK en su PC con Windows, necesitará un emulador de Android que puede ejecutar aplicaciones Android en su PC. Hay muchos emuladores disponibles en línea, pero recomendamos usar BlueStacks o NoxPlayer, ya que son fáciles de usar y compatibles con la mayoría de los juegos. Aquí están los pasos para descargar World Empire 2027 APK en su PC con Windows utilizando un emulador:</p>
18
- <ol>
19
- <li>Descargue un emulador de Android como BlueStacks o NoxPlayer desde sus sitios web oficiales en <a href=">https://www.bluestacks.com/</a> o <a href="">https:/www.bignox.com/<//a>, respectivamente. </li>
20
- <li>Instalar el emulador y lanzarlo en su PC. Es posible que tenga que iniciar sesión con su cuenta de Google para acceder a la Google Play Store.</li>
21
- <li>Ir a la Google Play Store o Uptodown y buscar World Empire 2027. También puede utilizar los enlaces proporcionados anteriormente para los dispositivos Android. </li>
22
- <li>Haga clic en el botón de instalación y espere a que el juego se instale en su emulador. El proceso de instalación puede tardar algún tiempo dependiendo de las especificaciones de su PC y la velocidad de Internet. </li>
23
- <li>Después de la instalación se hace, puede iniciar el juego desde su emulador y disfrutar de jugar World Empire 2027 APK en su PC con Windows.</li>
24
- </ol>
25
- <h2>Características de Imperio Mundial 2027 APK</h2>
26
- <p>Imperio Mundial 2027 APK es un juego que ofrece muchas características que lo hacen divertido y atractivo. Estas son algunas de las características que puedes esperar del juego:</p>
27
- <ul>
28
-
29
- Utiliza la diplomacia, la guerra, la tecnología, la economía y el espionaje para construir tu imperio. </strong> Puedes interactuar con otros países de diferentes maneras, como formar alianzas, declarar la guerra, enviar ayuda, imponer sanciones, etc. También puedes usar tu red de espionaje para recopilar información o sabotear a tus enemigos. Puede investigar nuevas tecnologías que le darán una ventaja en la guerra o la economía. Puedes administrar tu presupuesto y recursos sabiamente e invertir en diferentes sectores como educación, salud, infraestructura, etc.</li>
30
- <li><strong>Compite con otros jugadores online o localmente en modo multijugador. </strong> Puedes jugar Imperio Mundial 2027 APK con otros jugadores de todo el mundo o con tus amigos localmente en el modo multijugador. Puedes unirte o crear una sala con hasta 8 jugadores y elegir diferentes configuraciones como el tamaño del mapa, nivel de dificultad, tiempo de turno, etc. Puedes chatear con otros jugadores y cooperar o competir con ellos. También puede jugar contra la IA en el modo para un jugador si lo prefiere. </li>
31
- </ul>
32
- <h2> Consejos y trucos para jugar World Empire 2027 APK</h2>
33
- <p>Imperio Mundial 2027 APK es un juego que requiere estrategia y planificación para tener éxito. Aquí hay algunos consejos y trucos que pueden ayudarle a mejorar su juego y ganar más guerras:</p>
34
- <ul>
35
- <li><strong>Esté atento a las noticias y eventos mundiales que afectan a su país y sus relaciones. </strong> El juego presenta una simulación realista de la situación mundial y los eventos que pueden cambiar el curso de la historia. Usted recibirá actualizaciones de noticias y alertas que le informarán de los asuntos actuales y los problemas que están sucediendo en todo el mundo. También verá cómo otros países reaccionan a estos eventos y cómo afectan sus relaciones con ellos. Debe prestar atención a estas noticias y eventos y ajustar su estrategia en consecuencia. </li>
36
-
37
- <li><strong>Forma alianzas con otros países y usa a tus espías para reunir información o sabotear a tus enemigos. </strong> La diplomacia es otro aspecto clave del juego que puede ayudarte a alcanzar tus objetivos o a prevenir conflictos. Usted puede formar alianzas con otros países que comparten sus intereses o ideología, y cooperar con ellos de varias maneras como el comercio, la ayuda, el apoyo militar, etc. También puede utilizar sus espías para recopilar información sobre los planes de otros países, fortalezas, debilidades, etc., o para sabotear su economía, militar, tecnología, etc. Sin embargo, debe tener cuidado de no quedar atrapado por su contrainteligencia, ya que esto puede dañar su reputación y relaciones. </li>
38
- </ul>
39
- <h2>Conclusión</h2>
40
- <p>Imperio Mundial 2027 APK es un juego emocionante y desafiante que le permite llevar a su país en un escenario futurista donde el mundo está en caos. Puedes elegir entre 180 países y usar la diplomacia, la guerra, la tecnología, la economía y el espionaje para construir tu imperio y competir con otros jugadores en línea o localmente en el modo multijugador. El juego presenta una simulación realista de la situación mundial y los eventos que pueden cambiar el curso de la historia. El juego también ofrece muchas características que lo hacen divertido y atractivo, como personalización, investigación, noticias, chat, etc.</p>
41
- <p>Si usted es un fan de los juegos de guerra de estrategia o quiere poner a prueba sus habilidades de liderazgo y el pensamiento estratégico, usted debe descargar Imperio Mundial 2027 APK en sus dispositivos Android o PC con Windows. El juego es gratis para descargar y jugar, pero contiene compras en la aplicación que pueden mejorar su experiencia de juego. Puedes descargar el juego desde el sitio web oficial o Uptodown, o desde la Google Play Store si tienes un emulador en tu PC.</p>
42
- <p>Entonces, ¿qué estás esperando? Descargar World Empire 2027 APK hoy y llevar a su país a la gloria! </p>
43
- <p></p>
44
- <h3>Preguntas frecuentes</h3>
45
- <p>Aquí están algunas de las preguntas más frecuentes sobre World Empire 2027 APK:</p>
46
- <ol>
47
-
48
- <li><p>Sí, Imperio Mundial 2027 APK es gratis para descargar y jugar, pero contiene compras en la aplicación que pueden mejorar su experiencia de juego. </p></li>
49
- <li><strong>¿Cómo puedo actualizar World Empire 2027 APK? </strong></li>
50
- <li><p>Puede actualizar Imperio Mundial 2027 APK visitando el sitio web oficial o Uptodown y descargar la última versión del juego. Alternativamente, puedes actualizar el juego desde Google Play Store si lo has instalado desde allí. </p></li>
51
- <li><strong>¿Cuáles son los requisitos del sistema para el Imperio Mundial 2027 APK? </strong></li>
52
- <li><p>Imperio Mundial 2027 APK requiere versión de Android 4.4 o superior para dispositivos Android, y Windows XP o superior para PC con Windows. El juego también requiere al menos 2 GB de RAM y una conexión a Internet estable. </p></li>
53
- <li><strong>¿Puedo jugar World Empire 2027 APK offline? </strong></li>
54
- <li><p>No, Imperio Mundial 2027 APK requiere una conexión a Internet para jugar, ya que es un juego multijugador que implica datos y eventos en tiempo real. Sin embargo, puedes jugar el juego en modo de un solo jugador contra la IA si lo deseas. </p></li>
55
- <li><strong>¿Cómo puedo contactar a los desarrolladores de World Empire 202 7 APK? </strong></li>
56
- <li><p>Puede ponerse en contacto con los desarrolladores de World Empire 2027 APK enviando un correo electrónico a [email protected] o visitando su sitio web en https://www.igindis.com/ También puede seguirlos en Facebook, Twitter, Instagram, YouTube y Discord para actualizaciones y noticias. </p></li>
57
- </ol></p> 64aa2da5cf<br />
58
- <br />
59
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cuerda Hroe Vice Ciudad 6.5 Descarga.md DELETED
@@ -1,71 +0,0 @@
1
- <br />
2
- <h1>Guerra relámpago de World of Warships Descargar: Cómo disfrutar del combate naval en su dispositivo móvil</h1>
3
- <p>¿Te gusta la guerra naval y la historia? ¿Quieres experimentar la emoción de comandar un buque de guerra en batallas épicas? ¿Quieres jugar un juego divertido y atractivo en tu dispositivo móvil? Si respondiste sí a cualquiera de estas preguntas, entonces deberías probar <strong>World of Warships Blitz War</strong>, un juego de acción gratuito que lleva el combate naval de la Segunda Guerra Mundial a dispositivos móviles y tabletas. </p>
4
- <h2>cuerda héroe vice ciudad 6.5 descarga</h2><br /><p><b><b>Download</b> === <a href="https://bltlly.com/2v6Klk">https://bltlly.com/2v6Klk</a></b></p><br /><br />
5
- <h2>¿Qué es la guerra relámpago de World of Warships? </h2>
6
- <h3>Un juego de acción gratuito que trae el combate naval de la Segunda Guerra Mundial a móviles y tabletas</h3>
7
- <p>World of Warships Blitz War es un juego desarrollado por Wargaming, la misma compañía detrás de los populares juegos de World of Tanks y World of Warplanes. Se basa en la galardonada versión para PC multijugador en línea de World of Warships, pero optimizada para dispositivos móviles. Le permite controlar buques de guerra realistas e históricamente precisos de diferentes naciones y épocas, como Japón, EE.UU., URSS, Reino Unido, Alemania, Italia, Francia y más. Puedes luchar en juegos multijugador online y offline y batallas navales contra otros jugadores o enemigos de IA. </p>
8
- <h3>Cuenta con más de 130 buques de guerra icónicos de diferentes naciones y épocas</h3>
9
- <p>World of Warships Blitz War presenta una colección inigualable de barcos históricos auténticos junto con máquinas navales de fantasía, ciencia ficción y ficción. Puede elegir entre cuatro clases de buques de guerra: acorazados, cruceros, destructores y portaaviones. Cada clase tiene sus propias características, ventajas y desventajas. Por ejemplo, los acorazados son potentes y duraderos, pero lentos y vulnerables a los torpedos. Los cruceros son versátiles y ágiles, pero tienen una armadura más débil. Los destructores son rápidos y sigilosos, pero tienen baja salud. Los portaaviones son unidades de apoyo que pueden lanzar aviones para explorar, atacar o defender. </p>
10
- <h3>Ofrece batallas épicas 7v7 rápidas y llenas de acción y jugabilidad estratégica</h3>
11
-
12
- <h2>¿Cómo descargar e instalar World of Warships Blitz War? </h2>
13
- <h3>Disponible para dispositivos iOS y Android</h3>
14
- <p>World of Warships Blitz War está disponible para iOS y Android <h3>Disponible para dispositivos iOS y Android</h3>
15
- <p>World of Warships Blitz War está disponible para dispositivos iOS y Android, para que puedas disfrutar del combate naval en tu smartphone o tablet. El juego es gratis para descargar y jugar, pero puede contener compras en la aplicación para algunos artículos y características premium. Puedes descargar el juego desde la App Store o Google Play Store, dependiendo de tu dispositivo. </p>
16
- <h3>Requiere al menos 3 GB de espacio libre y una conexión a Internet estable</h3>
17
- <p>Antes de descargar e instalar World of Warships Blitz War, asegúrese de tener suficiente espacio libre en su dispositivo. El juego requiere al menos 3 GB de espacio libre para funcionar sin problemas, y también puede descargar datos adicionales durante el proceso de instalación. También necesitas una conexión a Internet estable para jugar online, ya que es un juego multijugador que te conecta con otros jugadores de todo el mundo. </p>
18
- <h3>Pasos para descargar e instalar el juego desde las fuentes oficiales</h3>
19
- <p>Para descargar e instalar World of Warships Blitz War desde las fuentes oficiales, sigue estos sencillos pasos:</p>
20
- <p></p>
21
- <ol>
22
- <li>Ir a la App Store o Google Play Store en su dispositivo y buscar World of Warships guerra relámpago.</li>
23
- <li>Toca el icono del juego y luego toca el botón Instalar u Obtener para comenzar a descargar el juego. </li>
24
- <li>Espera a que termine la descarga y luego toca el botón Abrir o Jugar para iniciar el juego. </li>
25
- <li>Siga las instrucciones en pantalla para crear su cuenta, elija su servidor y complete el tutorial. </li>
26
- <li>¡Disfruta del juego! </li>
27
- </ol>
28
- <h2>¿Cómo se juega guerra relámpago del mundo de los buques de guerra? </h2>
29
- <h3>Elija su buque de guerra preferido de cuatro clases: acorazados, cruceros, destructores y portaaviones</h3>
30
-
31
- <ul>
32
- <li>Acorazados: Estos son los buques de guerra más pesados y poderosos del juego. Tienen una armadura gruesa, armas grandes y alta salud. Pueden infligir daño masivo a las naves enemigas con su batería principal y armas secundarias. Sin embargo, también son lentos, torpes y vulnerables a los torpedos y aviones. Son más adecuados para el combate de largo alcance y el daño de tanque para su equipo. </li>
33
- <li>Cruceros: Estos son los buques de guerra más versátiles y ágiles del juego. Tienen armaduras medianas, cañones de disparo rápido y buena velocidad. Pueden realizar varios papeles en la batalla, como exploración, apoyo, flanqueo o caza. También pueden usar consumibles especiales, como sonar, radar, pantalla de humo o búsqueda hidroacústica. Sin embargo, tienen menos salud que los acorazados y pueden ser fácilmente penetrados por sus proyectiles. Son más adecuados para el combate de medio alcance y la adaptación a diferentes situaciones. </li>
34
- <li>Destructores: Estos son los buques de guerra más pequeños y rápidos en el juego. Tienen armadura delgada, armas de fuego rápido y alto sigilo. Pueden lanzar torpedos contra naves enemigas desde una distancia segura o emboscarlos desde detrás de islas o pantallas de humo. También pueden capturar bases más rápido que otras clases. Sin embargo, tienen una salud muy baja y pueden ser destruidos por algunos impactos de cualquier nave. Son los más adecuados para combatir a corta distancia y acosar a los enemigos. </li>
35
- <li>Portaaviones: Estas son las unidades de apoyo del juego. Tienen armadura débil, sin armas y baja velocidad. Pueden lanzar aviones desde su cubierta para explorar, atacar o defender a sus aliados o enemigos. Pueden controlar hasta tres escuadrones de aviones a la vez: cazas, bombarderos en picada o bombarderos torpederos. Sin embargo, tienen una maniobrabilidad muy limitada y son altamente dependientes de sus aviones. Son los más adecuados para el combate de largo alcance y proporcionar apoyo aéreo. </li>
36
- </ul>
37
- <h3>Personaliza tu nave de guerra con varios módulos, mejoras y camuflajes</h3>
38
-
39
- <p>Una vez que haya elegido su clase de buque de guerra y nación, puede personalizarlo con varios módulos, actualizaciones y camuflajes. Los módulos son partes de tu nave de guerra que afectan su rendimiento, como el casco, el motor, las armas, los torpedos, los aviones, etc. Puedes investigar y comprar nuevos módulos con la experiencia y los créditos obtenidos de las batallas. Las mejoras son mejoras que mejoran los atributos de tu nave de guerra, como la supervivencia, potencia de fuego, maniobrabilidad, ocultación, etc. Puedes comprar e instalar hasta seis mejoras por buque de guerra con créditos. Los camuflajes son artículos cosméticos que cambian la apariencia de su nave de guerra y también proporcionan algunas bonificaciones, como un rango de detección reducido o una mayor ganancia de experiencia. Puedes comprar camuflajes permanentes o temporales con créditos o oro. </p>
40
- <h3>Únete a una batalla y controla tu nave de guerra usando simples controles táctiles</h3>
41
- <p>Cuando estés listo para unirte a una batalla, puedes tocar el botón Batalla en el menú principal y elegir un modo. Serás emparejado con otros jugadores de habilidad y nivel similar. El juego cargará el mapa y los equipos. Verá su nave de guerra en la vista de puerto, donde puede verificar sus consumibles, señales y chatear con sus compañeros de equipo. Para iniciar la batalla, toca el botón Listo. </p>
42
- <p>Una vez que comience la batalla, verás tu nave de guerra en la vista 3D, donde puedes controlarla usando simples controles táctiles. Puede utilizar el joystick virtual de la izquierda para dirigir su nave de guerra y ajustar su velocidad. Puede usar los botones de la derecha para disparar sus armas principales, lanzar torpedos o aviones. También puede utilizar los botones de la parte inferior para cambiar entre diferentes vistas, acercar o alejar, activar consumibles o acceder al mini-mapa. También puedes deslizar la pantalla para mirar alrededor y apuntar a tus enemigos. </p>
43
- <h3>Cooperar con sus aliados, detectar a sus enemigos, y utilizar sus armas y habilidades para ganar la batalla</h3>
44
-
45
- <p>Para ganar la batalla, tienes que cooperar con tus aliados, detectar a tus enemigos y usar tus armas y habilidades de manera efectiva. Tienes que comunicarte con tu equipo usando el chat o comandos rápidos. Tienes que buscar naves enemigas usando tu vista, radar, sonar o aviones. Tienes que apuntar a los puntos débiles de tus enemigos y esquivar su fuego. Tienes que usar tus consumibles en el momento y situación adecuados. Tienes que adaptarte a la marea cambiante de la batalla y tomar decisiones inteligentes. </p>
46
- <h2>¿Cómo mejorar tus habilidades y progreso en la guerra relámpago de World of Warships? </h2>
47
- <h3>Conozca las fortalezas y debilidades de cada clase de buque de guerra y nación</h3>
48
- <p>Para mejorar tus habilidades y progreso en World of Warships Blitz War, tienes que aprender las fortalezas y debilidades de cada clase de buque de guerra y nación. Tienes que saber qué papel juega cada clase en la batalla y cómo contrarrestarlos. Tienes que saber qué nación tiene qué ventajas y desventajas en términos de potencia de fuego, armadura, velocidad, sigilo, etc. Tienes que saber qué módulos, <h3>Aprende las fortalezas y debilidades de cada clase de buque de guerra y nación</h3>
49
- <p>Para mejorar tus habilidades y progreso en World of Warships Blitz War, tienes que aprender las fortalezas y debilidades de cada clase de buque de guerra y nación. Tienes que saber qué papel juega cada clase en la batalla y cómo contrarrestarlos. Tienes que saber qué nación tiene qué ventajas y desventajas en términos de potencia de fuego, armadura, velocidad, sigilo, etc. Tienes que saber qué módulos, mejoras y camuflajes se adaptan mejor a cada nave de guerra. Puedes encontrar información y consejos útiles en el sitio web oficial del juego, wiki, foros o canales de YouTube. </p>
50
- <h3>Estudia los mapas y usa el terreno a tu favor</h3>
51
-
52
- <h3>Completa misiones, desafíos y eventos para ganar recompensas y desbloquear nuevos buques de guerra</h3>
53
- <p>Una tercera manera de mejorar tus habilidades y progreso en World of Warships Blitz War es completar misiones, desafíos y eventos para ganar recompensas y desbloquear nuevos buques de guerra. Puedes acceder a varias misiones y desafíos desde el menú principal, como misiones diarias, misiones semanales, misiones de campaña, etc. También puedes participar en varios eventos que ofrecen recompensas especiales, como eventos estacionales, históricos o batallas especiales. Al completar estas tareas, puede ganar experiencia, créditos, oro, contenedores, planos, fichas, etc. que puede utilizar para investigar y comprar nuevos buques de guerra u otros artículos. </p>
54
- <h3>Únete a una flota o crea la tuya propia para chatear, jugar y competir con otros jugadores</h3>
55
- <p>Una cuarta forma de mejorar tus habilidades y progreso en World of Warships Blitz War es unirte a una flota o crear la tuya propia para chatear, jugar y competir con otros jugadores. Una flota es un grupo de jugadores que comparten un nombre común, etiqueta, logotipo y canal de chat. Puedes unirte a una flota existente o crear la tuya invitando a tus amigos u otros jugadores. Al estar en una flota, puedes chatear con otros miembros, jugar juntos en divisiones o batallas de clanes, intercambiar regalos o recursos, ganar puntos de flota y recompensas, etc. También puedes competir con otras flotas en la clasificación de la flota o torneos. </p>
56
- <h2>Conclusión</h2>
57
-
58
- <p>Si quieres disfrutar del combate naval en tu dispositivo móvil, deberías descargar e instalar World of Warships Blitz War hoy. Puedes encontrar el juego en la App Store o Google Play Store, o visitar el sitio web oficial para obtener más información. También puede seguir el juego en las redes sociales o unirse a los foros de la comunidad para mantenerse actualizado e interactuar con otros jugadores. World of Warships Blitz War es un juego que te mantendrá enganchado durante horas y te hará sentir como un verdadero comandante naval. </p>
59
- <h2>Preguntas frecuentes</h2>
60
- <h3>P: ¿Cómo puedo obtener más oro en Guerra Blitz de World of Warships? </h3>
61
- <p>A: El oro es la moneda premium en World of Warships Blitz War que se puede usar para comprar artículos y funciones premium, como barcos premium, cuentas premium, contenedores, etc. Puedes obtener más oro completando ciertas misiones o desafíos, participando en eventos especiales u ofertas, ver anuncios o comprarlos con dinero real. </p>
62
- <h3>P: ¿Cómo puedo obtener más planos en Guerra Blitz de World of Warships? </h3>
63
- <p>A: Los planos son artículos especiales que se pueden usar para investigar nuevas naves de guerra o actualizar las existentes. Puedes obtener más planos abriendo contenedores, completando misiones o desafíos, participando en eventos especiales u ofertas, o comprándolos con oro. </p>
64
- <h3>P: ¿Cómo puedo cambiar mi servidor en World of Warships Blitz War? </h3>
65
- <p>A: Puedes cambiar tu servidor en World of Warships Blitz War pulsando en el icono de configuración del menú principal y luego pulsando en la opción de servidor. Puede elegir entre cuatro servidores: Norteamérica, Europa, Asia o CIS. Sin embargo, cambiar tu servidor restablecerá tu progreso y tendrás que empezar desde cero. </p>
66
- <h3>P: ¿Cómo puedo reportar un error o un problema en Guerra Blitz de World of Warships? </h3>
67
-
68
- <h3>P: ¿Cómo puedo unirme a una flota o crear la mía propia en World of Warships Blitz War? </h3>
69
- <p>A: Puedes unirte a una flota o crear la tuya propia en World of Warships Blitz War tocando el icono de la flota en el menú principal y luego tocando la opción de búsqueda o crear. A continuación, puede navegar a través de flotas existentes o crear su propia mediante el establecimiento de un nombre, etiqueta, logotipo, descripción, etc. También puede invitar a sus amigos u otros jugadores a unirse a su flota. </p> 64aa2da5cf<br />
70
- <br />
71
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar El Certificado Del Consejo De Abogados De La India.md DELETED
@@ -1,111 +0,0 @@
1
-
2
- <h1>¿Cómo descargar el certificado del Colegio de Abogados de la India? </h1>
3
- <p></p>
4
- <h2>descargar el certificado del consejo de abogados de la India</h2><br /><p><b><b>Download</b> &#9734; <a href="https://bltlly.com/2v6JCt">https://bltlly.com/2v6JCt</a></b></p><br /><br />
5
- <ul>
6
- <li> ¿Cuál es el propósito y los beneficios del certificado? </li>
7
- <li> ¿Cuáles son los criterios de elegibilidad y los pasos para solicitar el certificado? </li>
8
- <li> ¿Cómo descargar el certificado desde el sitio web o la aplicación del BCI? </li>
9
- <li> ¿Cuáles son algunos problemas y soluciones comunes para el certificado? </li>
10
- <li> ¿Cómo renovar y verificar el certificado? </li>
11
- </ul>
12
- <p>Espero que encuentre este artículo útil e informativo. ¡Comencemos! </p>
13
- <h2>Propósito y beneficios del certificado</h2>
14
- <p>El propósito principal de emitir un certificado de práctica a los defensores es:</p>
15
- <ul>
16
- <li>Asegúrese de que los defensores no practicantes o defensores que se trasladan a otra profesión o negocio se desplazan a la lista de defensores no practicantes. </li>
17
- <li>Obtener el control de los Consejos de Abogados del Estado, así como otros órganos elegidos en virtud de la Ley de Defensores.</li>
18
- <li>Tener comunicación directa y contacto con defensores practicantes. </li>
19
- <li>Asegúrese de que todos los candidatos que se han inscrito después de 2010 aparecen en All India Bar Examination (AIBE) y lo pasan. </li>
20
- <li>Asegúrese de que todos los beneficios ofrecidos a los defensores son disfrutados solo por los defensores practicantes. </li>
21
- </ul>
22
- <p>Algunos de los beneficios de tener un certificado de práctica son:</p>
23
- <ul>
24
- <li>Permite a un abogado ejercer la abogacía en cualquier tribunal o autoridad en la India.</li>
25
- <li>Mejora la credibilidad y reputación de un defensor entre clientes, compañeros y jueces. </li>
26
- <li>Da derecho a un defensor a varios planes de bienestar, planes de seguro, fondos de pensiones, etc., proporcionados por el ICB o los Consejos de Abogados del Estado.</li>
27
- <li>Ayuda a un defensor a mantenerse actualizado con desarrollos legales, reglas, regulaciones, juicios, etc., a través de boletines del ICB, revistas, seminarios, talleres, etc.</li>
28
- <li>Permite a un defensor participar en las elecciones del ICB, comités, subcomités, etc., y contribuir a la formulación de políticas y la gobernanza de la profesión jurídica. </li>
29
- </ul>
30
-
31
- <p>Los criterios de elegibilidad para solicitar un certificado de práctica son:</p>
32
- <p></p>
33
- <ul>
34
- <li>El solicitante debe ser ciudadano de la India.</li>
35
- <li>El solicitante debe tener un título de abogado (3 años/5 años) de un instituto de derecho reconocido aprobado por el BCI.</li>
36
- <li>El solicitante debe estar inscrito en cualquier Consejo de Abogados del Estado como defensor. </li>
37
- <li>El solicitante debe haber superado el AIBE realizado por el BCI dentro de los dos años de inscripción. </li>
38
- <li>El solicitante debe pagar una cuota de inscripción de Rs. 600/- al BCI junto con el formulario de solicitud y otros documentos. </li>
39
- </ul>
40
- <p>Los pasos para solicitar un certificado de práctica son:</p>
41
- <ol>
42
- <li>Descargue el formulario de solicitud desde el sitio web del BCI o consígalo en la oficina del Consejo de Abogados del Estado. </li>
43
- <li>Rellene los detalles como nombre, dirección, número de inscripción, fecha de inscripción, número de rollo AIBE, fecha de pasar AIBE, etc.</li>
44
- <li>Adjuntar los siguientes documentos con el formulario de solicitud: <ul>
45
- <li>Una copia del certificado de inscripción emitido por el Consejo de Abogados del Estado.</li>
46
- <li>Una copia del certificado de pase AIBE emitido por BCI.</li>
47
- <li>Una copia del certificado de grado de derecho o certificado provisional. </li>
48
- <li>Una copia de prueba de identidad como tarjeta Aadhaar, tarjeta PAN, tarjeta de identificación del votante, etc.</li>
49
- <li>Dos fotografías tamaño pasaporte. </li>
50
- <li>Un proyecto de demanda de Rs. 600/- a favor del Colegio de Abogados de la India pagadero en Nueva Delhi.</li>
51
- </ul>
52
- </li>
53
- <li>Envíe el formulario de solicitud junto con los documentos y la cuota a la oficina del Consejo de Abogados del Estado o enviarlo por correo a la oficina del BCI en Nueva Delhi.</li>
54
- <li>Espere a que el BCI verifique y apruebe la solicitud. El procesamiento puede tardar hasta 30 días. </li>
55
- <li>Una vez aprobado, recoger el certificado de práctica de la oficina del Consejo de Abogados del Estado o recibirlo por correo de BCI.</li>
56
- </ol>
57
- <h2>¿Cómo descargar el certificado desde el sitio web o la aplicación del BCI? </h2>
58
-
59
- <ol>
60
- <li>Visite el sitio web del BCI en <a href="">https://www.barcouncilofindia.org/</a> o descargue la aplicación BCI desde Google Play Store o Apple App Store.</li>
61
- <li>Inicie sesión con su número de inscripción y contraseña. Si no tiene una cuenta, regístrese con sus datos y cree una contraseña. </li>
62
- <li>Ir a la sección de "Certificado de práctica" y haga clic en "Descargar certificado". </li>
63
- <li> Seleccione el año y el mes de la emisión del certificado e introduzca su número de inscripción. </li>
64
- <li>Haga clic en "Enviar" y descargue el archivo PDF de su certificado. </li>
65
- <li> También puede imprimir o compartir el certificado según su conveniencia. </li>
66
- </ol>
67
- <h2>Problemas y soluciones comunes para el certificado</h2>
68
- <p>Algunos de los problemas comunes que enfrentan los defensores con respecto al certificado de práctica son:</p>
69
- <ul>
70
- <li>El certificado se pierde, se daña o se roba. </li>
71
- <li>El certificado no se recibe dentro de los 30 días de la solicitud. </li>
72
- <li>El certificado contiene errores o discrepancias en nombre, dirección, número de inscripción, etc.</li>
73
- <li>El certificado no es aceptado por algunos tribunales o autoridades como prueba válida de la práctica. </li>
74
- </ul>
75
- <p>Algunas de las soluciones para estos problemas son:</p>
76
- <ul>
77
- <li>Si el certificado se pierde, se daña o se roba, un defensor puede solicitar un certificado duplicado pagando una tarifa de Rs. 1000/- al BCI y presentar una declaración jurada indicando el motivo de la pérdida o daño junto con una copia de FIR en caso de robo. El certificado duplicado se emitirá dentro de los 15 días posteriores a la solicitud. </li>
78
- <li>Si el certificado no se recibe dentro de los 30 días posteriores a la solicitud, un defensor puede ponerse en contacto con la oficina del Consejo de Abogados del Estado o la oficina del BCI y preguntar sobre el estado de su solicitud. También puede rastrear el estado en línea a través del sitio web o la aplicación del ICB ingresando su número de inscripción y fecha de nacimiento. </li>
79
-
80
- <li>Si el certificado no es aceptado por algunos juzgados, tribunales o autoridades como prueba válida de la práctica, un abogado puede apelar ante el ICB o los Consejos de Abogados del Estado y solicitar su intervención y aclaración. También puede mostrar otros documentos como el documento de identidad, la tarjeta de inscripción, la tarjeta de pase AIBE, etc., para demostrar su elegibilidad para ejercer la abogacía en la India.</li>
81
- </ul>
82
- <h2>¿Cómo renovar y verificar el certificado? </h2>
83
- <p>El certificado de práctica es válido por cinco años a partir de la fecha de emisión. Un abogado tiene que renovar su certificado antes de su vencimiento mediante el pago de una cuota de renovación de Rs. 600/- al BCI y presentar un formulario de solicitud de renovación junto con una copia de su certificado existente. El certificado renovado se emitirá dentro de los 15 días posteriores a la solicitud. Un defensor también puede verificar su certificado en línea a través del sitio web o aplicación del BCI ingresando su número de inscripción y número de certificado. La verificación mostrará los detalles del certificado tales como nombre, dirección, fecha de emisión, fecha de vencimiento, etc. La verificación también mostrará si el certificado es válido, caducado, suspendido o cancelado. <h2>Conclusión</h2>
84
- <p>En este artículo, he explicado cómo descargar el certificado del Colegio de Abogados de la India, que es un documento que certifica que un defensor es elegible para ejercer la abogacía en la India. También he explicado el propósito y los beneficios del certificado, los criterios de elegibilidad y los pasos para solicitar el certificado, los problemas comunes y las soluciones para el certificado, y cómo renovar y verificar el certificado. Espero que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en ponerse en contacto conmigo. ¡Gracias por leer! </p>
85
- <h2>Preguntas frecuentes</h2>
86
- <h3>¿Cuál es la diferencia entre el certificado de inscripción y el certificado de práctica? </h3>
87
-
88
- <h3>¿Cómo puedo comprobar el estado de mi solicitud de certificado de práctica? </h3>
89
- <p>Puede comprobar el estado de su solicitud en línea a través del sitio web o aplicación del BCI ingresando su número de inscripción y fecha de nacimiento. También puede ponerse en contacto con la oficina del Consejo del Colegio de Abogados del Estado o la oficina del BCI y preguntar sobre el estado de su solicitud. </p>
90
- <h3>¿Cómo puedo cambiar mi dirección u otros detalles en mi certificado de práctica? </h3>
91
- <p>Puede cambiar su dirección u otros detalles en su certificado de práctica solicitando una corrección al BCI y pagando una tarifa de Rs. 500/-. Usted tiene que presentar una carta de solicitud junto con los documentos de apoyo como prueba de identidad, certificado de inscripción, certificado de pase AIBE, etc. El certificado corregido se emitirá dentro de los 15 días de la solicitud. </p>
92
- <h3>¿Cuáles son las consecuencias de no tener un certificado de práctica válido? </h3>
93
- <p>Si no tiene un certificado de práctica válido, puede enfrentar las siguientes consecuencias:</p>
94
- <ul>
95
- <li>No se le puede permitir comparecer ante ningún tribunal, tribunal o autoridad como abogado. </li>
96
- <li>Usted no puede tener derecho a ningún plan de bienestar, planes de seguro, fondos de pensiones, etc., proporcionado por el BCI o los Consejos de Abogados del Estado.</li>
97
- <li>Es posible que no pueda participar en las elecciones del ICB, comités, subcomités, etc., y contribuir a la formulación de políticas y la gobernanza de la profesión jurídica. </li>
98
- <li>Usted puede ser responsable de una acción disciplinaria por parte del BCI o los Consejos de Abogados del Estado por mala conducta profesional. </li>
99
- </ul>
100
- <h3>¿Dónde puedo obtener más información sobre el certificado de práctica? </h3>
101
- <p>Puede obtener más información sobre el certificado de práctica de las siguientes fuentes:</p>
102
- <ul>
103
- <li>El sitio web del ICB en <a href="">https://www.barcouncilofindia.org/</a></li>
104
- <li>La aplicación BCI disponible en Google Play Store o Apple App Store.</li>
105
- <li>La oficina del Consejo de Abogados en su estado. </li>
106
- <li>La oficina de BCI en 21 Rouse Avenue Área Institucional ITO Cerca de Bal Bhawan Nueva Delhi - 110002. </li>
107
-
108
- <li>La dirección de correo electrónico del ICB en [email protected]. </li>
109
- </ul></p> 64aa2da5cf<br />
110
- <br />
111
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Genshin Impacto Paso En Un Vasto.md DELETED
@@ -1,232 +0,0 @@
1
-
2
- <h1>Descargar Genshin Impact: Paso a un Vasto Mundo Mágico de Aventura</h1>
3
- <p>¿Alguna vez has soñado con explorar un vasto mundo abierto lleno de maravillas, misterios y magia? ¿Quieres embarcarte en una aventura épica para encontrar a tu hermano perdido y descubrir los secretos de una tierra gobernada por poderosos dioses? ¿Quieres experimentar un sistema de combate lleno de acción que te permite liberar poderes elementales y cambiar entre diferentes personajes? Si respondiste sí a cualquiera de estas preguntas, entonces deberías descargar Genshin Impact, uno de los mejores juegos gratuitos jamás creados. </p>
4
- <p>Genshin Impact es un juego de rol de acción de mundo abierto que te quitará el aliento con sus impresionantes gráficos, banda sonora inmersiva, una historia atractiva y una jugabilidad diversa. En este juego, puedes explorar siete naciones inspiradas en diferentes culturas y mitologías, conocer un colorido elenco de personajes con personalidades y habilidades únicas, y luchar contra enemigos formidables con tus amigos o en solitario. También puede personalizar su partido con más de 40 personajes de diferentes elementos y tipos de armas, así como actualizar su equipo y habilidades para adaptarse a su estilo de juego. </p>
5
- <h2>descargar genshin impacto paso en un vasto</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://bltlly.com/2v6ISq">https://bltlly.com/2v6ISq</a></b></p><br /><br />
6
- <p>Si tienes curiosidad sobre este increíble juego y quieres saber más sobre él, sigue leyendo este artículo. Te contaremos todo lo que necesitas saber sobre Genshin Impact, desde cómo descargarlo en diferentes plataformas, cómo jugarlo eficazmente, cómo progresar sin problemas y cómo mejorar tu experiencia en él. Al final de este artículo, estarás listo para entrar en un vasto mundo mágico de aventura. </p>
7
- <h2>¿Qué es el impacto de Genshin? </h2>
8
-
9
- <p>El mundo del juego está dividido en siete regiones, cada una correspondiente a uno de los siete elementos: Anemo (viento), Geo (tierra), Pyro (fuego), Hydro (agua), Cryo (hielo), Electro (rayo), y Dendro (naturaleza). Cada región tiene su propia cultura, historia, monumentos, vida silvestre y clima. El jugador puede explorar el mundo libremente utilizando varios métodos de travesía, como caminar, escalar, nadar, planear y montar a caballo. El jugador también puede interactuar con varios objetos y PNJ en el mundo, como abrir cofres, recoger artículos, cocinar alimentos, fabricar armas, completar misiones y unirse a eventos. </p>
10
- <p>El sistema de combate del juego se basa en el uso de habilidades y reacciones elementales. El jugador puede elegir hasta cuatro personajes para formar un grupo, cada uno con su propio elemento y tipo de arma. El jugador puede cambiar entre personajes en cualquier momento durante el combate, y utilizar sus habilidades para hacer daño y crear reacciones elementales. Las reacciones elementales son efectos especiales que ocurren cuando dos elementos diferentes entran en contacto, como la quema, congelación, electrocutación o explosión. Estos efectos pueden proporcionar varias ventajas o desventajas en combate, dependiendo de la situación. </p>
11
- <h2>Cómo descargar Genshin impacto en diferentes plataformas</h2>
12
- <p>Genshin Impact está disponible para su descarga en Windows PC, dispositivos Android, dispositivos iOS, PlayStation 4 y PlayStation 5. El juego es gratuito para descargar y jugar en todas las plataformas, pero requiere una conexión a Internet y una cuenta miHoYo para acceder. Estos son los pasos para descargar e instalar Genshin Impact en diferentes plataformas:</p>
13
- <h3>Cómo descargar Genshin impacto en Windows PC</h3>
14
- <ol>
15
- <li>Ir al sitio web oficial de Genshin Impact en <a href="">https://genshin.mihoyo.com/en</a>. </li>
16
- <li>Haga clic en el icono "Windows" en la esquina superior derecha de la página. </li>
17
- <li>Haga clic en el botón "Descargar ahora" y guarde el archivo en su ubicación preferida. </li>
18
-
19
- <li>Abre el lanzador del juego e inicia sesión con tu cuenta miHoYo o crea uno si no tienes uno. </li>
20
- <li>Haga clic en el botón "Obtener juego" y espere a que el juego se descargue e instale. </li>
21
- <li>Haga clic en el botón "Lanzar" y disfrute del juego. </li>
22
- </ol>
23
- <h4>Requisitos del sistema de PC</h4>
24
- <tabla>
25
- <tr>
26
- <th>Requisitos mínimos</th>
27
- <th>Requisitos recomendados</th>
28
- </tr>
29
- <tr>
30
- <td>OS: Windows 7 SP1 de 64 bits o superior</td>
31
- <td>OS: Windows 10 64-bit</td>
32
- </tr>
33
- <tr>
34
- <td>CPU: Intel Core i5 o equivalente</td>
35
- <td>CPU: Intel Core i7 o equivalente</td>
36
- </tr>
37
- <tr>
38
- <td>RAM: 8 GB</td>
39
- <td>RAM: 16 GB</td>
40
- </tr>
41
- <tr>
42
- <td>GPU: NVIDIA GeForce GT 1030 o superior</td>
43
- <td>GPU: NVIDIA GeForce RTX 1060 6 GB o superior</td>
44
- </tr>
45
- <tr>
46
- <td>DirectX: Versión 11</td>
47
- <td>DirectX: Versión 11</td>
48
- </tr>
49
- <tr>
50
- <td>Almacenamiento: 30 GB de espacio disponible</td>
51
- <td>Almacenamiento: 30 GB de espacio disponible</td>
52
- </tr>
53
- <tr>
54
- <td>Tarjeta de sonido: tarjeta de sonido compatible con DirectX o chipset a bordo</td>
55
- <td>Tarjeta de sonido: tarjeta de sonido compatible con DirectX o chipset a bordo</td>
56
- </tr>
57
- </tabla>
58
- <h3>Cómo descargar Genshin impacto en dispositivos Android</h3>
59
- <ol>
60
- <li>Ir a la aplicación Google Play Store en su dispositivo y buscar "Impacto Genshin". </li>
61
- <li> Seleccione el juego de los resultados de búsqueda y toque en el "Instalar" botón. </li>
62
- <li>Espera a que el juego se descargue e instale en tu dispositivo. </li>
63
- <li>Abra la aplicación del juego e inicie sesión con su cuenta miHoYo o cree una si no tiene una. </li>
64
- <li>Siga las instrucciones para descargar datos adicionales y comenzar el juego. </li>
65
- <li>Disfruta del juego. </li>
66
- </ol>
67
- <h4>Requisitos del sistema móvil</h4> <h4>Requisitos del sistema móvil</h4>
68
- <tabla>
69
- <tr>
70
- <th>Dispositivos soportados</th>
71
- <th>Tamaño del archivo</th>
72
- </tr>
73
- <tr>
74
- <td>iOS 9.0 y superior, iPhone 8 Plus y superior, iPad Air 3 y superior, iPad mini 5 y superior, iPad Pro y superior</td>
75
- <td>Acerca de 9 GB</td>
76
- </tr>
77
- </tabla>
78
- <h3>Cómo descargar Genshin Impact en PlayStation 4 y PlayStation 5</h3>
79
- <ol>
80
-
81
- <li>Seleccione el juego de los resultados de búsqueda y haga clic en el botón "Descargar". </li>
82
- <li>Espera a que el juego se descargue e instale en tu consola. </li>
83
- <li>Abra la aplicación del juego e inicie sesión con su cuenta miHoYo o cree una si no tiene una. </li>
84
- <li>Comienza el juego y disfrútalo. </li>
85
- </ol>
86
- <h2>Cómo jugar Genshin impacto</h2>
87
- <p>Ahora que ha descargado Genshin Impact en su plataforma preferida, usted está listo para jugar. Sin embargo, antes de sumergirte en el juego, es posible que quieras aprender algunos consejos y trucos básicos sobre cómo controlar a tu personaje, cambiar entre los miembros del grupo, usar habilidades elementales e interactuar con el mundo. Estas son algunas de las cosas esenciales que necesitas saber para jugar Genshin Impact eficazmente:</p>
88
- <h3>Cómo controlar tu carácter</h3>
89
- <p>Los controles del juego varían dependiendo de la plataforma en la que estés jugando. Estos son los controles predeterminados para cada plataforma:</p>
90
- <h4>Controles de PC</h4>
91
- <tabla>
92
- <tr>
93
- <th>Acción</th>
94
- <th>Clave</th>
95
- </tr>
96
- <tr>
97
- <td>Mover</td>
98
- <td>WASD</td>
99
- </tr>
100
- <tr>
101
- <td>Saltar/Subir/Deslizarse</td>
102
- <td>Espacio</td>
103
- </tr>
104
- <tr>
105
- <td>Sprint/Nadar más rápido</td>
106
- <td>Shift</td>
107
- </tr>
108
- <tr>
109
- <td>Ataque/Confirmar/Recoger</td>
110
- <td>Botón izquierdo del ratón</td>
111
- </tr>
112
- <tr>
113
- <td>Objetivo/Cancelar/Volver</td>
114
- <td>Botón derecho del ratón</td>
115
- </tr>
116
- <tr>
117
- <td>Habilidad elemental/Interactuar/Hablar/Examinar/Abrir cofres/Revivir personajes/Cambiar objetivos (mientras apuntas)</td>
118
- <td>E</td>
119
- </tr>
120
- <tr>
121
- <td>Explosión elemental/ Uso de alimentos o medicamentos (mientras apunta)</td>
122
- <td>Q</td>
123
- </tr <tr>
124
- <td>Cambiar caracteres</td>
125
- <td>1/2/3/4</td>
126
- </tr>
127
- <tr>
128
- <td>Abrir inventario</td>
129
- <td>B</td>
130
- </tr>
131
- <tr>
132
- <td>Pantalla de caracteres abiertos</td>
133
- <td>C</td>
134
- </tr>
135
- <tr>
136
- <td>Abrir mapa</td>
137
- <td>M</td>
138
- </tr>
139
- <tr>
140
- <td>Menú de misiones abiertas</td>
141
- <td>J</td>
142
- </tr>
143
- <tr>
144
- <td>Abrir menú Paimon</td>
145
- <td>Esc</td>
146
- </tr>
147
- <tr>
148
- <td>Pausa/Reanudar (mientras apunta)</td>
149
- <td>P</td>
150
- </tr>
151
- <tr>
152
- <td>Mostrar el cursor (mientras apunta)</td>
153
- <td>Alt</td>
154
- </tr>
155
- <tr>
156
- <td>Toma una foto (mientras apuntas)</td>
157
-
158
- </tr>
159
- <tr>
160
- <td>Mostrar/Ocultar interfaz de usuario (mientras apunta)</td>
161
- <td>F6</td>
162
- </tr>
163
- </tabla>
164
- <p>También puede personalizar las combinaciones de teclas en el menú de configuración si prefiere un diseño diferente. </p>
165
- <p></p>
166
- <h4>Controles móviles</h4>
167
- <tabla>
168
- <tr>
169
- <th>Acción</th>
170
- <th>Control</th>
171
- </tr>
172
- <tr>
173
- <td>Mover</td>
174
- <td>Arrastre el joystick virtual en el lado izquierdo de la pantalla. </td>
175
- </tr>
176
- <tr>
177
- <td>Saltar/Subir/Deslizarse/Correr/Nadar más rápido/Atacar/Confirmar/Recoger/Apuntar/Cancelar/Retroceder/Habilidad elemental/Interactuar/Hablar/Examinar/Abrir cofres/Revivir personajes/Cambiar objetivos (mientras apuntas)/Usar Comida o Medicina (mientras apuntas</td>
178
- <td>Toca los botones correspondientes en el lado derecho de la pantalla. </td>
179
- </tr>
180
- <tr>
181
- <td>Elemental Burst/Switch Characters/Open Inventory/Open Character Screen/Open Map/Open Quest Menu/Open Paimon Menu/Pause/Resume (while aiming)/Show Cursor (while aiming)/Take Photo (while aiming)/Show/Hide UI (while aiming)</td>
182
- <td>Toca los iconos correspondientes en la parte superior o inferior de la pantalla. </td>
183
- </tr>
184
- </tabla>
185
- <p>También puede ajustar la sensibilidad y el diseño de los controles en el menú de configuración si desea cambiarlos. </p>
186
- <h4>Controles de PlayStation</h4>
187
- <tabla>
188
- <tr>
189
- <th>Acción</th>
190
- <th>Botón</th>
191
- </tr>
192
- <tr>
193
- <td>Mover/Cámara/Objetivo/Cambiar objetivos (mientras apunta)</td>
194
- <td>L3/R3 o Stick izquierdo/Stick derecho.</td>
195
- </tr>
196
- <tr>
197
- <td>Saltar/Subir/Deslizarse/Correr/Nadar más rápido/Atacar/Confirmar/Recoger/Apuntar/Cancelar/Retroceder/Habilidad elemental/Interactuar/Hablar/Examinar/Abrir cofres/Revivir personajes/Cambiar objetivos (mientras apuntas)/Usar Comida o Medicina (mientras apuntas</td>
198
- <td>X/O/Cuadrado/Triángulo o Cruz/Círculo/Cuadrado/Triángulo.</td>
199
- </tr>
200
- <tr>
201
- <td>Elemental Burst/Switch Characters/Open Inventory/Open Character Screen/Open Map/Open Quest Menu/Open Paimon Menu/Pause/Resume (while aiming)/Show Cursor (while aiming)/Take Photo (while aiming)/Show/Hide UI (while aiming)</td>
202
- <td>L1/R1/L2/R2/D-pad o parachoques izquierdo/parachoques derecho/gatillo izquierdo/gatillo derecho/D-pad. </td>
203
-
204
- <h2>Cómo progresar en el impacto de Genshin</h2>
205
- <p>Ahora que sabes cómo jugar Genshin Impact, es posible que te preguntes cómo progresar en el juego y desbloquear más contenido y características. El juego tiene muchas cosas que ofrecer, pero algunos de ellos están detrás de ciertos requisitos o niveles. Estos son algunos de los aspectos clave del sistema de progresión del juego y cómo conseguirlos:</p>
206
- <h3>Cómo subir de nivel a tu personaje, armas y artefactos</h3>
207
- <p>Una de las cosas más importantes que hacer en Genshin Impact es subir de nivel a tu personaje, armas y artefactos. Estas son las principales fuentes de tus estadísticas y habilidades, y determinarán qué tan bien puedes manejar los desafíos y los enemigos en el juego. Aquí hay algunos consejos sobre cómo subir de nivel a tu personaje, armas y artefactos:</p>
208
- <ul>
209
- <li>Para subir de nivel a tu personaje, necesitas usar materiales de Character EXP, como Wanderer’s Advice, Adventurer’s Experience y Hero’s Wit. Puedes obtener estos materiales de varias fuentes, como completar misiones, abrir cofres, derrotar enemigos y participar en eventos. También puede usar otros caracteres como materiales EXP, pero esto no es recomendable ya que consumirá sus recursos y limitará sus opciones. </li>
210
- <li>Para subir de nivel sus armas, es necesario utilizar materiales de mejora de armas, tales como Mineral de mejora, Mineral de mejora fina y Mineral de mejora mística. Usted puede obtener estos materiales de varias fuentes, tales como la elaboración de ellos en un herrero, la minería de ellos de los depósitos de mineral, apertura de cofres, derrotar a los enemigos, y participar en eventos. También puede utilizar otras armas como materiales de mejora, pero esto no se recomienda, ya que consumirá sus recursos y limitar sus opciones. </li>
211
-
212
- <li>Para aumentar el nivel máximo de tu personaje, armas y artefactos, necesitas ascenderlos usando materiales específicos. Puedes obtener estos materiales de varias fuentes, como explorar el mundo, completar dominios, derrotar jefes y comprarlos en tiendas. Puedes ascender a tu personaje en los niveles 20, 40, 50, 60, 70 y 80. Puedes ascender tus armas en los niveles 20, 40, 50, 60, 70 y 80. Puedes ascender tus artefactos a nivel 4 y 8.</li>
213
- </ul>
214
- <h3> Cómo desbloquear nuevas regiones, misiones y características</h3>
215
- <p>Otra cosa importante que hacer en Genshin Impact es desbloquear nuevas regiones, misiones y características. Estas son las principales fuentes de tu contenido y disfrute, y te proporcionarán más oportunidades y recompensas en el juego. Aquí hay algunos consejos sobre cómo desbloquear nuevas regiones, misiones y características:</p>
216
- <ul>
217
- <li>Para desbloquear nuevas regiones, necesitas aumentar tu Rango de Aventurero (AR). Esta es una medida de su progreso general y experiencia en el juego, y determinará qué regiones, misiones y características están disponibles para usted. Puedes aumentar tu RA completando misiones, abriendo cofres, descubriendo ubicaciones, activando teletransportadores y estatuas, y participando en eventos. </li>
218
- <li>Para desbloquear nuevas misiones, necesitas cumplir ciertos requisitos o condiciones. Estos pueden variar dependiendo del tipo y la dificultad de la misión. Algunas misiones están relacionadas con la historia y se desbloquearán automáticamente a medida que avanzas en el juego. Algunas misiones están relacionadas con el mundo y se desbloquearán explorando el mundo o interactuando con NPCs. Algunas misiones están relacionadas con el dominio y se desbloquearán completando dominios o alcanzando ciertos niveles de AR. </li>
219
- <li>Para desbloquear nuevas características, necesitas completar ciertas misiones o alcanzar ciertos niveles de AR. Estas características incluyen el modo cooperativo, el abismo en espiral, el sistema de reputación, el sistema de vivienda, el sistema de pesca y más. Estas características mejorarán tu experiencia de juego y te proporcionarán más opciones y recompensas en el juego. </li>
220
- </ul>
221
-
222
- <p>Genshin Impact ya es un gran juego que ofrece mucha diversión y emoción, pero todavía hay maneras de mejorar tu experiencia en él aún más. Aquí hay algunas sugerencias sobre cómo aprovechar al máximo su aventura en Teyvat:</p>
223
- <h3>Juego multiplataforma</h3>
224
- <p>Una de las mejores características de Genshin Impact es su juego multiplataforma. Esto significa que puedes jugar con tus amigos en diferentes dispositivos, como PC, móvil y PlayStation. También puede cambiar entre dispositivos sin perder su progreso o datos. Para habilitar el juego multiplataforma, el juego. Sin embargo, el juego tiene un modo sin conexión que le permite jugar el juego sin conexión a Internet por un tiempo limitado. Puede activar el modo sin conexión yendo al menú Paimon y seleccionando "Configuración" y luego "Otro" y luego "Red". Sin embargo, no podrás acceder a algunas de las características y contenido del juego en modo offline, como modo cooperativo, eventos, correo y actualizaciones. </p>
225
- <h3>¿Es Genshin Impact cross-play? </h3>
226
- <p>Sí, Genshin Impact es un juego cruzado, lo que significa que puedes jugar con tus amigos en diferentes dispositivos, como PC, móvil y PlayStation. También puede cambiar entre dispositivos sin perder su progreso o datos. Sin embargo, debes asegurarte de que tú y tus amigos estén jugando en el mismo servidor y hayan alcanzado el rango de aventurero 16 o superior para acceder al modo cooperativo. </p>
227
- <h3>¿Es Genshin Impact de pago a ganancia? </h3>
228
-
229
- <h3>¿Es seguro el impacto de Genshin? </h3>
230
- <p>Sí, Genshin Impact es seguro, lo que significa que no contiene ningún contenido dañino o malicioso o software que pueda dañar su dispositivo o información personal. El juego está desarrollado por una empresa de confianza, miHoYo, que ha estado haciendo juegos exitosos y populares durante años. El juego también está verificado y certificado por varias plataformas y autoridades, como Google Play Store, App Store, PlayStation Store, ESRB, PEGI, CERO y más. El juego también tiene una política de privacidad y términos de servicio que protegen sus derechos e intereses como usuario. Sin embargo, debe ser cuidadoso y responsable al jugar el juego en línea, como no compartir su cuenta o información personal con nadie, no hacer clic en enlaces o anuncios sospechosos, no descargar ningún software o mods no oficiales o no autorizados, y no participar en actividades ilegales o poco éticas. </p> 64aa2da5cf<br />
231
- <br />
232
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: BetterChat - AI for everyone
3
- sdk: docker
4
- emoji: ⚡
5
- colorTo: blue
6
- pinned: true
7
- license: mit
8
- colorFrom: gray
9
- duplicated_from: BetterAPI/BetterChat
10
- ---
11
-
12
-
13
- ### BetterChat
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/BigDL-Nano_inference/data.py DELETED
@@ -1,233 +0,0 @@
1
- # This file is copied from https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/data.py
2
-
3
- # MIT License
4
-
5
- # Copyright (c) 2022 Lorenzo Breschi
6
-
7
- # Permission is hereby granted, free of charge, to any person obtaining a copy
8
- # of this software and associated documentation files (the "Software"), to deal
9
- # in the Software without restriction, including without limitation the rights
10
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
- # copies of the Software, and to permit persons to whom the Software is
12
- # furnished to do so, subject to the following conditions:
13
-
14
- # The above copyright notice and this permission notice shall be included in all
15
- # copies or substantial portions of the Software.
16
-
17
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
- # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
- # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
- # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
- # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
- # SOFTWARE.
24
-
25
- from typing import Callable, Dict
26
-
27
- import torch
28
-
29
- from torch.utils.data import Dataset
30
-
31
- import torchvision.transforms.functional as F
32
- from torchvision import transforms
33
- import pytorch_lightning as pl
34
-
35
- from collections.abc import Iterable
36
-
37
-
38
- # image reader writer
39
- from pathlib import Path
40
- from PIL import Image
41
- from typing import Tuple
42
-
43
-
44
- def read_image(filepath: Path, mode: str = None) -> Image:
45
- with open(filepath, 'rb') as file:
46
- image = Image.open(file)
47
- return image.convert(mode)
48
-
49
-
50
- image2tensor = transforms.ToTensor()
51
- tensor2image = transforms.ToPILImage()
52
-
53
-
54
- def write_image(image: Image, filepath: Path):
55
- filepath.parent.mkdir(parents=True, exist_ok=True)
56
- image.save(str(filepath))
57
-
58
-
59
- def read_image_tensor(filepath: Path, mode: str = 'RGB') -> torch.Tensor:
60
- return image2tensor(read_image(filepath, mode))
61
-
62
-
63
- def write_image_tensor(input: torch.Tensor, filepath: Path):
64
- write_image(tensor2image(input), filepath)
65
-
66
-
67
- def get_valid_indices(H: int, W: int, patch_size: int, random_overlap: int = 0):
68
-
69
- vih = torch.arange(random_overlap, H-patch_size -
70
- random_overlap+1, patch_size)
71
- viw = torch.arange(random_overlap, W-patch_size -
72
- random_overlap+1, patch_size)
73
- if random_overlap > 0:
74
- rih = torch.randint_like(vih, -random_overlap, random_overlap)
75
- riw = torch.randint_like(viw, -random_overlap, random_overlap)
76
- vih += rih
77
- viw += riw
78
- vi = torch.stack(torch.meshgrid(vih, viw)).view(2, -1).t()
79
- return vi
80
-
81
-
82
- def cut_patches(input: torch.Tensor, indices: Tuple[Tuple[int, int]], patch_size: int, padding: int = 0):
83
- # TODO use slices to get all patches at the same time ?
84
-
85
- patches_l = []
86
- for n in range(len(indices)):
87
-
88
- patch = F.crop(input, *(indices[n]-padding),
89
- *(patch_size+padding*2,)*2)
90
- patches_l.append(patch)
91
- patches = torch.cat(patches_l, dim=0)
92
-
93
- return patches
94
-
95
-
96
- def prepare_data(data_path: Path, read_func: Callable = read_image_tensor) -> Dict:
97
- """
98
- Takes a data_path of a folder which contains subfolders with input, target, etc.
99
- lablelled by the same names.
100
- :param data_path: Path of the folder containing data
101
- :param read_func: function that reads data and returns a tensor
102
- """
103
- data_dict = {}
104
-
105
- subdir_names = ["target", "input", "mask"] # ,"helper"
106
-
107
- # checks only files for which there is an target
108
- # TODO check for images
109
- name_ls = [file.name for file in (
110
- data_path / "target").iterdir() if file.is_file()]
111
-
112
- subdirs = [data_path / sdn for sdn in subdir_names]
113
- for sd in subdirs:
114
- if sd.is_dir():
115
- data_ls = []
116
- files = [sd / name for name in name_ls]
117
- for file in files:
118
- tensor = read_func(file)
119
- H, W = tensor.shape[-2:]
120
- data_ls.append(tensor)
121
- # TODO check that all sizes match
122
- data_dict[sd.name] = torch.stack(data_ls, dim=0)
123
-
124
- data_dict['name'] = name_ls
125
- data_dict['len'] = len(data_dict['name'])
126
- data_dict['H'] = H
127
- data_dict['W'] = W
128
- return data_dict
129
-
130
-
131
- # TODO an image is loaded whenever a patch is needed, this may be a bottleneck
132
- class DataDictLoader():
133
- def __init__(self, data_dict: Dict,
134
- batch_size: int = 16,
135
- max_length: int = 128,
136
- shuffle: bool = False):
137
- """
138
- """
139
-
140
- self.batch_size = batch_size
141
- self.shuffle = shuffle
142
-
143
- self.batch_size = batch_size
144
-
145
- self.data_dict = data_dict
146
- self.dataset_len = data_dict['len']
147
- self.len = self.dataset_len if max_length is None else min(
148
- self.dataset_len, max_length)
149
- # Calculate # batches
150
- num_batches, remainder = divmod(self.len, self.batch_size)
151
- if remainder > 0:
152
- num_batches += 1
153
- self.num_batches = num_batches
154
-
155
- def __iter__(self):
156
- if self.shuffle:
157
- r = torch.randperm(self.dataset_len)
158
- self.data_dict = {k: v[r] if isinstance(
159
- v, Iterable) else v for k, v in self.data_dict.items()}
160
- self.i = 0
161
- return self
162
-
163
- def __next__(self):
164
- if self.i >= self.len:
165
- raise StopIteration
166
- batch = {k: v[self.i:self.i+self.batch_size]
167
- if isinstance(v, Iterable) else v for k, v in self.data_dict.items()}
168
-
169
- self.i += self.batch_size
170
- return batch
171
-
172
- def __len__(self):
173
- return self.num_batches
174
-
175
-
176
- class PatchDataModule(pl.LightningDataModule):
177
-
178
- def __init__(self, data_dict,
179
- patch_size: int = 2**5,
180
- batch_size: int = 2**4,
181
- patch_num: int = 2**6):
182
- super().__init__()
183
- self.data_dict = data_dict
184
- self.H, self.W = data_dict['H'], data_dict['W']
185
- self.len = data_dict['len']
186
-
187
- self.batch_size = batch_size
188
- self.patch_size = patch_size
189
- self.patch_num = patch_num
190
-
191
- def dataloader(self, data_dict, **kwargs):
192
- return DataDictLoader(data_dict, **kwargs)
193
-
194
- def train_dataloader(self):
195
- patches = self.cut_patches()
196
- return self.dataloader(patches, batch_size=self.batch_size, shuffle=True,
197
- max_length=self.patch_num)
198
-
199
- def val_dataloader(self):
200
- return self.dataloader(self.data_dict, batch_size=1)
201
-
202
- def test_dataloader(self):
203
- return self.dataloader(self.data_dict) # TODO batch size
204
-
205
- def cut_patches(self):
206
- # TODO cycle once
207
- patch_indices = get_valid_indices(
208
- self.H, self.W, self.patch_size, self.patch_size//4)
209
- dd = {k: cut_patches(
210
- v, patch_indices, self.patch_size) for k, v in self.data_dict.items()
211
- if isinstance(v, torch.Tensor)
212
- }
213
- threshold = 0.1
214
- mask_p = torch.mean(
215
- dd.get('mask', torch.ones_like(dd['input'])), dim=(-1, -2, -3))
216
- masked_idx = (mask_p > threshold).nonzero(as_tuple=True)[0]
217
- dd = {k: v[masked_idx] for k, v in dd.items()}
218
- dd['len'] = len(masked_idx)
219
- dd['H'], dd['W'] = (self.patch_size,)*2
220
-
221
- return dd
222
-
223
-
224
- class ImageDataset(Dataset):
225
- def __init__(self, file_paths: Iterable, read_func: Callable = read_image_tensor):
226
- self.file_paths = file_paths
227
-
228
- def __getitem__(self, idx: int) -> dict:
229
- file = self.file_paths[idx]
230
- return read_image_tensor(file), file.name
231
-
232
- def __len__(self) -> int:
233
- return len(self.file_paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/cmake/ThrustBuildCompilerTargets.cmake DELETED
@@ -1,150 +0,0 @@
1
- #
2
- # This file defines the `thrust_build_compiler_targets()` function, which
3
- # creates the following interface targets:
4
- #
5
- # thrust.compiler_interface
6
- # - Interface target providing compiler-specific options needed to build
7
- # Thrust's tests, examples, etc.
8
- #
9
- # thrust.promote_cudafe_warnings
10
- # - Interface target that adds warning promotion for NVCC cudafe invocations.
11
- # - Only exists to work around github issue #1174 on tbb.cuda configurations.
12
- # - May be combined with thrust.compiler_interface when #1174 is fully resolved.
13
-
14
- function(thrust_build_compiler_targets)
15
- set(cxx_compile_definitions)
16
- set(cxx_compile_options)
17
-
18
- thrust_update_system_found_flags()
19
-
20
- if (THRUST_TBB_FOUND)
21
- # There's a ton of these in the TBB backend, even though the code is correct.
22
- # TODO: silence these warnings in code instead
23
- append_option_if_available("-Wno-unused-parameter" cxx_compile_options)
24
- endif()
25
-
26
- if ("MSVC" STREQUAL "${CMAKE_CXX_COMPILER_ID}")
27
- # TODO Enable /Wall instead of W3
28
- append_option_if_available("/W3" cxx_compile_options)
29
-
30
- # Treat all warnings as errors:
31
- append_option_if_available("/WX" cxx_compile_options)
32
-
33
- # Disabled loss-of-data conversion warnings.
34
- # TODO Re-enable.
35
- append_option_if_available("/wd4244" cxx_compile_options)
36
- append_option_if_available("/wd4267" cxx_compile_options)
37
-
38
- # Suppress numeric conversion-to-bool warnings.
39
- # TODO Re-enable.
40
- append_option_if_available("/wd4800" cxx_compile_options)
41
-
42
- # Disable warning about applying unary operator- to unsigned type.
43
- append_option_if_available("/wd4146" cxx_compile_options)
44
-
45
- # MSVC STL assumes that `allocator_traits`'s allocator will use raw pointers,
46
- # and the `__DECLSPEC_ALLOCATOR` macro causes issues with thrust's universal
47
- # allocators:
48
- # warning C4494: 'std::allocator_traits<_Alloc>::allocate' :
49
- # Ignoring __declspec(allocator) because the function return type is not
50
- # a pointer or reference
51
- # See https://github.com/microsoft/STL/issues/696
52
- append_option_if_available("/wd4494" cxx_compile_options)
53
-
54
- # Some of the async tests require /bigobj to fit all their sections into the
55
- # object files:
56
- append_option_if_available("/bigobj" cxx_compile_options)
57
-
58
- # "Oh right, this is Visual Studio."
59
- list(APPEND cxx_compile_definitions "NOMINMAX")
60
- else()
61
- append_option_if_available("-Werror" cxx_compile_options)
62
- append_option_if_available("-Wall" cxx_compile_options)
63
- append_option_if_available("-Wextra" cxx_compile_options)
64
- append_option_if_available("-Winit-self" cxx_compile_options)
65
- append_option_if_available("-Woverloaded-virtual" cxx_compile_options)
66
- append_option_if_available("-Wcast-qual" cxx_compile_options)
67
- append_option_if_available("-Wno-cast-align" cxx_compile_options)
68
- append_option_if_available("-Wno-long-long" cxx_compile_options)
69
- append_option_if_available("-Wno-variadic-macros" cxx_compile_options)
70
- append_option_if_available("-Wno-unused-function" cxx_compile_options)
71
- append_option_if_available("-Wno-unused-variable" cxx_compile_options)
72
- endif()
73
-
74
- if ("GNU" STREQUAL "${CMAKE_CXX_COMPILER_ID}")
75
- if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.5)
76
- # In GCC 4.4, the CUDA backend's kernel launch templates cause
77
- # impossible-to-decipher "'<anonymous>' is used uninitialized in this
78
- # function" warnings, so we disable uninitialized variable warnings.
79
- append_option_if_available("-Wno-uninitialized" cxx_compile_options)
80
- endif()
81
-
82
- if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 4.5)
83
- # This isn't available until GCC 4.3, and misfires on TMP code until
84
- # GCC 4.5.
85
- append_option_if_available("-Wlogical-op" cxx_compile_options)
86
- endif()
87
-
88
- if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 7.3)
89
- # GCC 7.3 complains about name mangling changes due to `noexcept`
90
- # becoming part of the type system; we don't care.
91
- append_option_if_available("-Wno-noexcept-type" cxx_compile_options)
92
- endif()
93
- endif()
94
-
95
- if (("Clang" STREQUAL "${CMAKE_CXX_COMPILER_ID}") OR
96
- ("XL" STREQUAL "${CMAKE_CXX_COMPILER_ID}"))
97
- # xlC and Clang warn about unused parameters in uninstantiated templates.
98
- # This causes xlC to choke on the OMP backend, which is mostly #ifdef'd out
99
- # (and thus has unused parameters) when you aren't using it.
100
- append_option_if_available("-Wno-unused-parameters" cxx_compile_options)
101
- endif()
102
-
103
- if ("Clang" STREQUAL "${CMAKE_CXX_COMPILER_ID}")
104
- # -Wunneeded-internal-declaration misfires in the unit test framework
105
- # on older versions of Clang.
106
- append_option_if_available("-Wno-unneeded-internal-declaration" cxx_compile_options)
107
- endif()
108
-
109
- if ("Feta" STREQUAL "${CMAKE_CUDA_COMPILER_ID}")
110
- # Today:
111
- # * NVCC accepts CUDA C++ in .cu files but not .cpp files.
112
- # * Feta accepts CUDA C++ in .cpp files but not .cu files.
113
- # TODO: This won't be necessary in the future.
114
- list(APPEND cxx_compile_options -cppsuffix=cu)
115
- endif()
116
-
117
- add_library(thrust.compiler_interface INTERFACE)
118
-
119
- foreach (cxx_option IN LISTS cxx_compile_options)
120
- target_compile_options(thrust.compiler_interface INTERFACE
121
- $<$<COMPILE_LANGUAGE:CXX>:${cxx_option}>
122
- $<$<AND:$<COMPILE_LANGUAGE:CUDA>,$<CUDA_COMPILER_ID:Feta>>:${cxx_option}>
123
- # Only use -Xcompiler with NVCC, not Feta.
124
- #
125
- # CMake can't split genexs, so this can't be formatted better :(
126
- # This is:
127
- # if (using CUDA and CUDA_COMPILER is NVCC) add -Xcompiler=opt:
128
- $<$<AND:$<COMPILE_LANGUAGE:CUDA>,$<CUDA_COMPILER_ID:NVIDIA>>:-Xcompiler=${cxx_option}>
129
- )
130
- endforeach()
131
-
132
- foreach (cxx_definition IN LISTS cxx_compile_definitions)
133
- # Add these for both CUDA and CXX targets:
134
- target_compile_definitions(thrust.compiler_interface INTERFACE
135
- ${cxx_definition}
136
- )
137
- endforeach()
138
-
139
- # Display warning numbers from nvcc cudafe errors:
140
- target_compile_options(thrust.compiler_interface INTERFACE
141
- # If using CUDA w/ NVCC...
142
- $<$<AND:$<COMPILE_LANGUAGE:CUDA>,$<CUDA_COMPILER_ID:NVIDIA>>:-Xcudafe=--display_error_number>
143
- )
144
-
145
- # This is kept separate for Github issue #1174.
146
- add_library(thrust.promote_cudafe_warnings INTERFACE)
147
- target_compile_options(thrust.promote_cudafe_warnings INTERFACE
148
- $<$<AND:$<COMPILE_LANGUAGE:CUDA>,$<CUDA_COMPILER_ID:NVIDIA>>:-Xcudafe=--promote_warnings>
149
- )
150
- endfunction()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/models/ade20k/segm_lib/nn/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- from .modules import *
2
- from .parallel import UserScatteredDataParallel, user_scattered_collate, async_copy_to
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/logger.py DELETED
@@ -1,93 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import functools
3
- import logging
4
- import os
5
- import sys
6
-
7
- from termcolor import colored
8
-
9
-
10
- class _ColorfulFormatter(logging.Formatter):
11
- def __init__(self, *args, **kwargs):
12
- self._root_name = kwargs.pop("root_name") + "."
13
- self._abbrev_name = kwargs.pop("abbrev_name", "")
14
- if len(self._abbrev_name):
15
- self._abbrev_name = self._abbrev_name + "."
16
- super(_ColorfulFormatter, self).__init__(*args, **kwargs)
17
-
18
- def formatMessage(self, record):
19
- record.name = record.name.replace(self._root_name, self._abbrev_name)
20
- log = super(_ColorfulFormatter, self).formatMessage(record)
21
- if record.levelno == logging.WARNING:
22
- prefix = colored("WARNING", "red", attrs=["blink"])
23
- elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
24
- prefix = colored("ERROR", "red", attrs=["blink", "underline"])
25
- else:
26
- return log
27
- return prefix + " " + log
28
-
29
-
30
- # so that calling setup_logger multiple times won't add many handlers
31
- @functools.lru_cache()
32
- def setup_logger(output=None, distributed_rank=0, *, color=True, name="imagenet", abbrev_name=None):
33
- """
34
- Initialize the detectron2 logger and set its verbosity level to "INFO".
35
-
36
- Args:
37
- output (str): a file name or a directory to save log. If None, will not save log file.
38
- If ends with ".txt" or ".log", assumed to be a file name.
39
- Otherwise, logs will be saved to `output/log.txt`.
40
- name (str): the root module name of this logger
41
-
42
- Returns:
43
- logging.Logger: a logger
44
- """
45
- logger = logging.getLogger(name)
46
- logger.setLevel(logging.DEBUG)
47
- logger.propagate = False
48
-
49
- if abbrev_name is None:
50
- abbrev_name = name
51
-
52
- plain_formatter = logging.Formatter(
53
- "[%(asctime)s.%(msecs)03d]: %(message)s", datefmt="%m/%d %H:%M:%S"
54
- )
55
- # stdout logging: master only
56
- if distributed_rank == 0:
57
- ch = logging.StreamHandler(stream=sys.stdout)
58
- ch.setLevel(logging.DEBUG)
59
- if color:
60
- formatter = _ColorfulFormatter(
61
- colored("[%(asctime)s.%(msecs)03d]: ", "green") + "%(message)s",
62
- datefmt="%m/%d %H:%M:%S",
63
- root_name=name,
64
- abbrev_name=str(abbrev_name),
65
- )
66
- else:
67
- formatter = plain_formatter
68
- ch.setFormatter(formatter)
69
- logger.addHandler(ch)
70
-
71
- # file logging: all workers
72
- if output is not None:
73
- if output.endswith(".txt") or output.endswith(".log"):
74
- filename = output
75
- else:
76
- filename = os.path.join(output, "log.txt")
77
- if distributed_rank > 0:
78
- filename = filename + f".rank{distributed_rank}"
79
- os.makedirs(os.path.dirname(filename), exist_ok=True)
80
-
81
- fh = logging.StreamHandler(_cached_log_stream(filename))
82
- fh.setLevel(logging.DEBUG)
83
- fh.setFormatter(plain_formatter)
84
- logger.addHandler(fh)
85
-
86
- return logger
87
-
88
-
89
- # cache the opened file object, so that different calls to `setup_logger`
90
- # with the same file name can safely write to the same file.
91
- @functools.lru_cache(maxsize=None)
92
- def _cached_log_stream(filename):
93
- return open(filename, "a")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/__init__.py DELETED
File without changes
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/dpt_depth.py DELETED
@@ -1,109 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
- from .base_model import BaseModel
6
- from .blocks import (
7
- FeatureFusionBlock,
8
- FeatureFusionBlock_custom,
9
- Interpolate,
10
- _make_encoder,
11
- forward_vit,
12
- )
13
-
14
-
15
- def _make_fusion_block(features, use_bn):
16
- return FeatureFusionBlock_custom(
17
- features,
18
- nn.ReLU(False),
19
- deconv=False,
20
- bn=use_bn,
21
- expand=False,
22
- align_corners=True,
23
- )
24
-
25
-
26
- class DPT(BaseModel):
27
- def __init__(
28
- self,
29
- head,
30
- features=256,
31
- backbone="vitb_rn50_384",
32
- readout="project",
33
- channels_last=False,
34
- use_bn=False,
35
- ):
36
-
37
- super(DPT, self).__init__()
38
-
39
- self.channels_last = channels_last
40
-
41
- hooks = {
42
- "vitb_rn50_384": [0, 1, 8, 11],
43
- "vitb16_384": [2, 5, 8, 11],
44
- "vitl16_384": [5, 11, 17, 23],
45
- }
46
-
47
- # Instantiate backbone and reassemble blocks
48
- self.pretrained, self.scratch = _make_encoder(
49
- backbone,
50
- features,
51
- False, # Set to true of you want to train from scratch, uses ImageNet weights
52
- groups=1,
53
- expand=False,
54
- exportable=False,
55
- hooks=hooks[backbone],
56
- use_readout=readout,
57
- )
58
-
59
- self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
60
- self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
61
- self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
62
- self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
63
-
64
- self.scratch.output_conv = head
65
-
66
-
67
- def forward(self, x):
68
- if self.channels_last == True:
69
- x.contiguous(memory_format=torch.channels_last)
70
-
71
- layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
72
-
73
- layer_1_rn = self.scratch.layer1_rn(layer_1)
74
- layer_2_rn = self.scratch.layer2_rn(layer_2)
75
- layer_3_rn = self.scratch.layer3_rn(layer_3)
76
- layer_4_rn = self.scratch.layer4_rn(layer_4)
77
-
78
- path_4 = self.scratch.refinenet4(layer_4_rn)
79
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
80
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
81
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
82
-
83
- out = self.scratch.output_conv(path_1)
84
-
85
- return out
86
-
87
-
88
- class DPTDepthModel(DPT):
89
- def __init__(self, path=None, non_negative=True, **kwargs):
90
- features = kwargs["features"] if "features" in kwargs else 256
91
-
92
- head = nn.Sequential(
93
- nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
94
- Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
95
- nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
96
- nn.ReLU(True),
97
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
98
- nn.ReLU(True) if non_negative else nn.Identity(),
99
- nn.Identity(),
100
- )
101
-
102
- super().__init__(head, **kwargs)
103
-
104
- if path is not None:
105
- self.load(path)
106
-
107
- def forward(self, x):
108
- return super().forward(x).squeeze(dim=1)
109
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/modeling_llama.py DELETED
@@ -1,755 +0,0 @@
1
- # This script is based on https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py
2
-
3
- """ PyTorch LLaMA model."""
4
- import math
5
- from typing import List, Optional, Tuple, Union
6
-
7
- import torch
8
- import torch.utils.checkpoint
9
- from torch import nn
10
- from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
11
-
12
- from transformers.activations import ACT2FN
13
- from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
14
- from transformers.modeling_utils import PreTrainedModel
15
- from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
16
- from transformers.models.llama.configuration_llama import LlamaConfig
17
-
18
-
19
- logger = logging.get_logger(__name__)
20
-
21
- _CONFIG_FOR_DOC = "LlamaConfig"
22
-
23
-
24
- # Copied from transformers.models.bart.modeling_bart._make_causal_mask
25
- def _make_causal_mask(
26
- input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
27
- ):
28
- """
29
- Make causal mask used for bi-directional self-attention.
30
- """
31
- bsz, tgt_len = input_ids_shape
32
- mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
33
- mask_cond = torch.arange(mask.size(-1), device=device)
34
- mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
35
- mask = mask.to(dtype)
36
-
37
- if past_key_values_length > 0:
38
- mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
39
- return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
40
-
41
-
42
- # Copied from transformers.models.bart.modeling_bart._expand_mask
43
- def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
44
- """
45
- Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
46
- """
47
- bsz, src_len = mask.size()
48
- tgt_len = tgt_len if tgt_len is not None else src_len
49
-
50
- expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
51
-
52
- inverted_mask = 1.0 - expanded_mask
53
-
54
- return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
55
-
56
-
57
- class LlamaRMSNorm(nn.Module):
58
- def __init__(self, hidden_size, eps=1e-6):
59
- """
60
- LlamaRMSNorm is equivalent to T5LayerNorm
61
- """
62
- super().__init__()
63
- self.weight = nn.Parameter(torch.ones(hidden_size))
64
- self.variance_epsilon = eps
65
-
66
- def forward(self, hidden_states):
67
- variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
68
- hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
69
-
70
- # convert into half-precision if necessary
71
- if self.weight.dtype in [torch.float16, torch.bfloat16]:
72
- hidden_states = hidden_states.to(self.weight.dtype)
73
-
74
- return self.weight * hidden_states
75
-
76
-
77
- class LlamaRotaryEmbedding(torch.nn.Module):
78
- def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
79
- super().__init__()
80
- inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
81
- self.register_buffer("inv_freq", inv_freq)
82
-
83
- # Build here to make `torch.jit.trace` work.
84
- self.max_seq_len_cached = max_position_embeddings
85
- t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
86
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
87
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
88
- emb = torch.cat((freqs, freqs), dim=-1)
89
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
90
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
91
-
92
- def forward(self, x, seq_len=None):
93
- # x: [bs, num_attention_heads, seq_len, head_size]
94
- # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
95
- if seq_len > self.max_seq_len_cached:
96
- self.max_seq_len_cached = seq_len
97
- t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
98
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
99
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
100
- emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
101
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
102
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
103
- return (
104
- self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
105
- self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
106
- )
107
-
108
-
109
- def rotate_half(x):
110
- """Rotates half the hidden dims of the input."""
111
- x1 = x[..., : x.shape[-1] // 2]
112
- x2 = x[..., x.shape[-1] // 2 :]
113
- return torch.cat((-x2, x1), dim=-1)
114
-
115
-
116
- def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
117
- gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
118
- gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
119
- cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
120
- sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
121
- q_embed = (q * cos) + (rotate_half(q) * sin)
122
- k_embed = (k * cos) + (rotate_half(k) * sin)
123
- return q_embed, k_embed
124
-
125
-
126
- class LlamaMLP(nn.Module):
127
- def __init__(
128
- self,
129
- hidden_size: int,
130
- intermediate_size: int,
131
- hidden_act: str,
132
- ):
133
- super().__init__()
134
- self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
135
- self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
136
- self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
137
- self.act_fn = ACT2FN[hidden_act]
138
-
139
- def forward(self, x):
140
- return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
141
-
142
-
143
- class LlamaAttention(nn.Module):
144
- """Multi-headed attention from 'Attention Is All You Need' paper"""
145
-
146
- def __init__(self, config: LlamaConfig):
147
- super().__init__()
148
- self.config = config
149
- self.hidden_size = config.hidden_size
150
- self.num_heads = config.num_attention_heads
151
- self.head_dim = self.hidden_size // self.num_heads
152
- self.max_position_embeddings = config.max_position_embeddings
153
-
154
- if (self.head_dim * self.num_heads) != self.hidden_size:
155
- raise ValueError(
156
- f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
157
- f" and `num_heads`: {self.num_heads})."
158
- )
159
- self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
160
- self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
161
- self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
162
- self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
163
- self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
164
-
165
- def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
166
- return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
167
-
168
- def forward(
169
- self,
170
- hidden_states: torch.Tensor,
171
- attention_mask: Optional[torch.Tensor] = None,
172
- position_ids: Optional[torch.LongTensor] = None,
173
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
174
- output_attentions: bool = False,
175
- use_cache: bool = False,
176
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
177
- bsz, q_len, _ = hidden_states.size()
178
-
179
- query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
180
- key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
181
- value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
182
-
183
- kv_seq_len = key_states.shape[-2]
184
- if past_key_value is not None:
185
- kv_seq_len += past_key_value[0].shape[-2]
186
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
187
- query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
188
- # [bsz, nh, t, hd]
189
-
190
- if past_key_value is not None:
191
- # reuse k, v, self_attention
192
- key_states = torch.cat([past_key_value[0], key_states], dim=2)
193
- value_states = torch.cat([past_key_value[1], value_states], dim=2)
194
-
195
- past_key_value = (key_states, value_states) if use_cache else None
196
-
197
- attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
198
-
199
- if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
200
- raise ValueError(
201
- f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
202
- f" {attn_weights.size()}"
203
- )
204
-
205
- if attention_mask is not None:
206
- if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
207
- raise ValueError(
208
- f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
209
- )
210
- attn_weights = attn_weights + attention_mask
211
- attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
212
-
213
- # upcast attention to fp32
214
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
215
- attn_output = torch.matmul(attn_weights, value_states)
216
-
217
- if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
218
- raise ValueError(
219
- f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
220
- f" {attn_output.size()}"
221
- )
222
-
223
- attn_output = attn_output.transpose(1, 2)
224
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
225
-
226
- attn_output = self.o_proj(attn_output)
227
-
228
- if not output_attentions:
229
- attn_weights = None
230
-
231
- return attn_output, attn_weights, past_key_value
232
-
233
-
234
- class LlamaDecoderLayer(nn.Module):
235
- def __init__(self, config: LlamaConfig):
236
- super().__init__()
237
- self.hidden_size = config.hidden_size
238
- self.self_attn = LlamaAttention(config=config)
239
- self.mlp = LlamaMLP(
240
- hidden_size=self.hidden_size,
241
- intermediate_size=config.intermediate_size,
242
- hidden_act=config.hidden_act,
243
- )
244
- self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
245
- self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
246
-
247
- def forward(
248
- self,
249
- hidden_states: torch.Tensor,
250
- attention_mask: Optional[torch.Tensor] = None,
251
- position_ids: Optional[torch.LongTensor] = None,
252
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
253
- output_attentions: Optional[bool] = False,
254
- use_cache: Optional[bool] = False,
255
- ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
256
- """
257
- Args:
258
- hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
259
- attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
260
- `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
261
- output_attentions (`bool`, *optional*):
262
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under
263
- returned tensors for more detail.
264
- use_cache (`bool`, *optional*):
265
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
266
- (see `past_key_values`).
267
- past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
268
- """
269
-
270
- residual = hidden_states
271
-
272
- hidden_states = self.input_layernorm(hidden_states)
273
-
274
- # Self Attention
275
- hidden_states, self_attn_weights, present_key_value = self.self_attn(
276
- hidden_states=hidden_states,
277
- attention_mask=attention_mask,
278
- position_ids=position_ids,
279
- past_key_value=past_key_value,
280
- output_attentions=output_attentions,
281
- use_cache=use_cache,
282
- )
283
- hidden_states = residual + hidden_states
284
-
285
- # Fully Connected
286
- residual = hidden_states
287
- hidden_states = self.post_attention_layernorm(hidden_states)
288
- hidden_states = self.mlp(hidden_states)
289
- hidden_states = residual + hidden_states
290
-
291
- outputs = (hidden_states,)
292
-
293
- if output_attentions:
294
- outputs += (self_attn_weights,)
295
-
296
- if use_cache:
297
- outputs += (present_key_value,)
298
-
299
- return outputs
300
-
301
-
302
- LLAMA_START_DOCSTRING = r"""
303
- This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
304
- library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
305
- etc.)
306
-
307
- This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
308
- Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
309
- and behavior.
310
-
311
- Parameters:
312
- config ([`LlamaConfig`]):
313
- Model configuration class with all the parameters of the model. Initializing with a config file does not
314
- load the weights associated with the model, only the configuration. Check out the
315
- [`~PreTrainedModel.from_pretrained`] method to load the model weights.
316
- """
317
-
318
-
319
- @add_start_docstrings(
320
- "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
321
- LLAMA_START_DOCSTRING,
322
- )
323
- class LlamaPreTrainedModel(PreTrainedModel):
324
- config_class = LlamaConfig
325
- base_model_prefix = "model"
326
- supports_gradient_checkpointing = True
327
- _no_split_modules = ["LlamaDecoderLayer"]
328
- _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
329
-
330
- def _init_weights(self, module):
331
- std = self.config.initializer_range
332
- if isinstance(module, nn.Linear):
333
- module.weight.data.normal_(mean=0.0, std=std)
334
- if module.bias is not None:
335
- module.bias.data.zero_()
336
- elif isinstance(module, nn.Embedding):
337
- module.weight.data.normal_(mean=0.0, std=std)
338
- if module.padding_idx is not None:
339
- module.weight.data[module.padding_idx].zero_()
340
-
341
- def _set_gradient_checkpointing(self, module, value=False):
342
- if isinstance(module, LlamaModel):
343
- module.gradient_checkpointing = value
344
-
345
-
346
- LLAMA_INPUTS_DOCSTRING = r"""
347
- Args:
348
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
349
- Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
350
- it.
351
-
352
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
353
- [`PreTrainedTokenizer.__call__`] for details.
354
-
355
- [What are input IDs?](../glossary#input-ids)
356
- attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
357
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
358
-
359
- - 1 for tokens that are **not masked**,
360
- - 0 for tokens that are **masked**.
361
-
362
- [What are attention masks?](../glossary#attention-mask)
363
-
364
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
365
- [`PreTrainedTokenizer.__call__`] for details.
366
-
367
- If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
368
- `past_key_values`).
369
-
370
- If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
371
- and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
372
- information on the default strategy.
373
-
374
- - 1 indicates the head is **not masked**,
375
- - 0 indicates the head is **masked**.
376
- position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
377
- Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
378
- config.n_positions - 1]`.
379
-
380
- [What are position IDs?](../glossary#position-ids)
381
- past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
382
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
383
- `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
384
- `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
385
-
386
- Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
387
- blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
388
-
389
- If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
390
- don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
391
- `decoder_input_ids` of shape `(batch_size, sequence_length)`.
392
- inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
393
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
394
- is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
395
- model's internal embedding lookup matrix.
396
- use_cache (`bool`, *optional*):
397
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
398
- `past_key_values`).
399
- output_attentions (`bool`, *optional*):
400
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
401
- tensors for more detail.
402
- output_hidden_states (`bool`, *optional*):
403
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
404
- more detail.
405
- return_dict (`bool`, *optional*):
406
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
407
- """
408
-
409
-
410
- @add_start_docstrings(
411
- "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
412
- LLAMA_START_DOCSTRING,
413
- )
414
- class LlamaModel(LlamaPreTrainedModel):
415
- """
416
- Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
417
-
418
- Args:
419
- config: LlamaConfig
420
- """
421
-
422
- def __init__(self, config: LlamaConfig):
423
- super().__init__(config)
424
- self.padding_idx = config.pad_token_id
425
- self.vocab_size = config.vocab_size
426
-
427
- self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
428
- self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
429
- self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
430
-
431
- self.gradient_checkpointing = False
432
- # Initialize weights and apply final processing
433
- self.post_init()
434
-
435
- def get_input_embeddings(self):
436
- return self.embed_tokens
437
-
438
- def set_input_embeddings(self, value):
439
- self.embed_tokens = value
440
-
441
- # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
442
- def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
443
- # create causal mask
444
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
445
- combined_attention_mask = None
446
- if input_shape[-1] > 1:
447
- combined_attention_mask = _make_causal_mask(
448
- input_shape,
449
- inputs_embeds.dtype,
450
- device=inputs_embeds.device,
451
- past_key_values_length=past_key_values_length,
452
- )
453
-
454
- if attention_mask is not None:
455
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
456
- expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
457
- inputs_embeds.device
458
- )
459
- combined_attention_mask = (
460
- expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
461
- )
462
-
463
- return combined_attention_mask
464
-
465
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
466
- def forward(
467
- self,
468
- input_ids: torch.LongTensor = None,
469
- attention_mask: Optional[torch.Tensor] = None,
470
- position_ids: Optional[torch.LongTensor] = None,
471
- past_key_values: Optional[List[torch.FloatTensor]] = None,
472
- inputs_embeds: Optional[torch.FloatTensor] = None,
473
- query_embeds: Optional[torch.FloatTensor] = None,
474
- use_cache: Optional[bool] = None,
475
- output_attentions: Optional[bool] = None,
476
- output_hidden_states: Optional[bool] = None,
477
- return_dict: Optional[bool] = None,
478
- ) -> Union[Tuple, BaseModelOutputWithPast]:
479
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
480
- output_hidden_states = (
481
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
482
- )
483
- use_cache = use_cache if use_cache is not None else self.config.use_cache
484
-
485
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
486
-
487
- # retrieve input_ids and inputs_embeds
488
- if input_ids is not None and inputs_embeds is not None:
489
- raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
490
- elif input_ids is not None:
491
- batch_size, seq_length = input_ids.shape
492
- elif inputs_embeds is not None:
493
- batch_size, seq_length, _ = inputs_embeds.shape
494
- else:
495
- raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
496
-
497
- if inputs_embeds is None:
498
- inputs_embeds = self.embed_tokens(input_ids)
499
- if query_embeds is not None:
500
- inputs_embeds = torch.cat([query_embeds, inputs_embeds], dim=1)
501
- batch_size, seq_length, _ = inputs_embeds.shape
502
-
503
- seq_length_with_past = seq_length
504
- past_key_values_length = 0
505
-
506
- if past_key_values is not None:
507
- past_key_values_length = past_key_values[0][0].shape[2]
508
- seq_length_with_past = seq_length_with_past + past_key_values_length
509
-
510
- if position_ids is None:
511
- device = input_ids.device if input_ids is not None else inputs_embeds.device
512
- position_ids = torch.arange(
513
- past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
514
- )
515
- position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
516
- else:
517
- position_ids = position_ids.view(-1, seq_length).long()
518
-
519
- # embed positions
520
- if attention_mask is None:
521
- attention_mask = torch.ones(
522
- (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
523
- )
524
- attention_mask = self._prepare_decoder_attention_mask(
525
- attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
526
- )
527
-
528
- hidden_states = inputs_embeds
529
-
530
- if self.gradient_checkpointing and self.training:
531
- if use_cache:
532
- logger.warning_once(
533
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
534
- )
535
- use_cache = False
536
-
537
- # decoder layers
538
- all_hidden_states = () if output_hidden_states else None
539
- all_self_attns = () if output_attentions else None
540
- next_decoder_cache = () if use_cache else None
541
-
542
- for idx, decoder_layer in enumerate(self.layers):
543
- if output_hidden_states:
544
- all_hidden_states += (hidden_states,)
545
-
546
- past_key_value = past_key_values[idx] if past_key_values is not None else None
547
-
548
- if self.gradient_checkpointing and self.training:
549
-
550
- def create_custom_forward(module):
551
- def custom_forward(*inputs):
552
- # None for past_key_value
553
- return module(*inputs, output_attentions, None)
554
-
555
- return custom_forward
556
-
557
- layer_outputs = torch.utils.checkpoint.checkpoint(
558
- create_custom_forward(decoder_layer),
559
- hidden_states,
560
- attention_mask,
561
- position_ids,
562
- None,
563
- )
564
- else:
565
- layer_outputs = decoder_layer(
566
- hidden_states,
567
- attention_mask=attention_mask,
568
- position_ids=position_ids,
569
- past_key_value=past_key_value,
570
- output_attentions=output_attentions,
571
- use_cache=use_cache,
572
- )
573
-
574
- hidden_states = layer_outputs[0]
575
-
576
- if use_cache:
577
- next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
578
-
579
- if output_attentions:
580
- all_self_attns += (layer_outputs[1],)
581
-
582
- hidden_states = self.norm(hidden_states)
583
-
584
- # add hidden states from the last decoder layer
585
- if output_hidden_states:
586
- all_hidden_states += (hidden_states,)
587
-
588
- next_cache = next_decoder_cache if use_cache else None
589
- if not return_dict:
590
- return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
591
- return BaseModelOutputWithPast(
592
- last_hidden_state=hidden_states,
593
- past_key_values=next_cache,
594
- hidden_states=all_hidden_states,
595
- attentions=all_self_attns,
596
- )
597
-
598
-
599
- class LlamaForCausalLM(LlamaPreTrainedModel):
600
- def __init__(self, config):
601
- super().__init__(config)
602
- self.model = LlamaModel(config)
603
-
604
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
605
-
606
- # Initialize weights and apply final processing
607
- self.post_init()
608
-
609
- def get_input_embeddings(self):
610
- return self.model.embed_tokens
611
-
612
- def set_input_embeddings(self, value):
613
- self.model.embed_tokens = value
614
-
615
- def get_output_embeddings(self):
616
- return self.lm_head
617
-
618
- def set_output_embeddings(self, new_embeddings):
619
- self.lm_head = new_embeddings
620
-
621
- def set_decoder(self, decoder):
622
- self.model = decoder
623
-
624
- def get_decoder(self):
625
- return self.model
626
-
627
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
628
- @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
629
- def forward(
630
- self,
631
- input_ids: torch.LongTensor = None,
632
- attention_mask: Optional[torch.Tensor] = None,
633
- position_ids: Optional[torch.LongTensor] = None,
634
- past_key_values: Optional[List[torch.FloatTensor]] = None,
635
- inputs_embeds: Optional[torch.FloatTensor] = None,
636
- query_embeds: Optional[torch.FloatTensor] = None,
637
- labels: Optional[torch.LongTensor] = None,
638
- use_cache: Optional[bool] = None,
639
- output_attentions: Optional[bool] = None,
640
- output_hidden_states: Optional[bool] = None,
641
- return_dict: Optional[bool] = None,
642
- ) -> Union[Tuple, CausalLMOutputWithPast]:
643
- r"""
644
- Args:
645
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
646
- Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
647
- config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
648
- (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
649
-
650
- Returns:
651
-
652
- Example:
653
-
654
- ```python
655
- >>> from transformers import AutoTokenizer, LlamaForCausalLM
656
-
657
- >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
658
- >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
659
-
660
- >>> prompt = "Hey, are you consciours? Can you talk to me?"
661
- >>> inputs = tokenizer(prompt, return_tensors="pt")
662
-
663
- >>> # Generate
664
- >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
665
- >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
666
- "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
667
- ```"""
668
-
669
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
670
- output_hidden_states = (
671
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
672
- )
673
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
674
-
675
- # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
676
- outputs = self.model(
677
- input_ids=input_ids,
678
- attention_mask=attention_mask,
679
- position_ids=position_ids,
680
- past_key_values=past_key_values,
681
- inputs_embeds=inputs_embeds,
682
- query_embeds=query_embeds,
683
- use_cache=use_cache,
684
- output_attentions=output_attentions,
685
- output_hidden_states=output_hidden_states,
686
- return_dict=return_dict,
687
- )
688
-
689
- hidden_states = outputs[0]
690
- logits = self.lm_head(hidden_states)
691
-
692
- loss = None
693
- if labels is not None:
694
- # Shift so that tokens < n predict n
695
- shift_logits = logits[..., :-1, :].contiguous()
696
- shift_labels = labels[..., 1:].contiguous()
697
- # Flatten the tokens
698
- loss_fct = CrossEntropyLoss()
699
- shift_logits = shift_logits.view(-1, self.config.vocab_size)
700
- shift_labels = shift_labels.view(-1)
701
- # Enable model parallelism
702
- shift_labels = shift_labels.to(shift_logits.device)
703
- loss = loss_fct(shift_logits, shift_labels)
704
-
705
- if not return_dict:
706
- output = (logits,) + outputs[1:]
707
- return (loss,) + output if loss is not None else output
708
-
709
- return CausalLMOutputWithPast(
710
- loss=loss,
711
- logits=logits,
712
- past_key_values=outputs.past_key_values,
713
- hidden_states=outputs.hidden_states,
714
- attentions=outputs.attentions,
715
- )
716
-
717
- def prepare_inputs_for_generation(
718
- self, input_ids, query_embeds=None, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
719
- ):
720
- if past_key_values:
721
- input_ids = input_ids[:, -1:]
722
-
723
- position_ids = kwargs.get("position_ids", None)
724
- if attention_mask is not None and position_ids is None:
725
- # create position_ids on the fly for batch generation
726
- position_ids = attention_mask.long().cumsum(-1) - 1
727
- position_ids.masked_fill_(attention_mask == 0, 1)
728
- if past_key_values:
729
- position_ids = position_ids[:, -1].unsqueeze(-1)
730
- query_embeds = None
731
-
732
- # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
733
- if inputs_embeds is not None and past_key_values is None:
734
- model_inputs = {"inputs_embeds": inputs_embeds}
735
- else:
736
- model_inputs = {"input_ids": input_ids}
737
-
738
- model_inputs.update(
739
- {
740
- "position_ids": position_ids,
741
- "query_embeds": query_embeds,
742
- "past_key_values": past_key_values,
743
- "use_cache": kwargs.get("use_cache"),
744
- "attention_mask": attention_mask,
745
- }
746
- )
747
- return model_inputs
748
-
749
- @staticmethod
750
- def _reorder_cache(past_key_values, beam_idx):
751
- reordered_past = ()
752
- for layer_past in past_key_values:
753
- reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
754
- return reordered_past
755
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/__init__.py DELETED
@@ -1,30 +0,0 @@
1
- from .core import (
2
- infer_vegalite_type,
3
- infer_encoding_types,
4
- sanitize_dataframe,
5
- parse_shorthand,
6
- use_signature,
7
- update_nested,
8
- display_traceback,
9
- SchemaBase,
10
- )
11
- from .html import spec_to_html
12
- from .plugin_registry import PluginRegistry
13
- from .deprecation import AltairDeprecationWarning
14
- from .schemapi import Undefined
15
-
16
-
17
- __all__ = (
18
- "infer_vegalite_type",
19
- "infer_encoding_types",
20
- "sanitize_dataframe",
21
- "spec_to_html",
22
- "parse_shorthand",
23
- "use_signature",
24
- "update_nested",
25
- "display_traceback",
26
- "AltairDeprecationWarning",
27
- "SchemaBase",
28
- "Undefined",
29
- "PluginRegistry",
30
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/dependencies/__init__.py DELETED
File without changes
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/otlLib/optimize/gpos.py DELETED
@@ -1,452 +0,0 @@
1
- import logging
2
- import os
3
- from collections import defaultdict, namedtuple
4
- from functools import reduce
5
- from itertools import chain
6
- from math import log2
7
- from typing import DefaultDict, Dict, Iterable, List, Sequence, Tuple
8
-
9
- from fontTools.config import OPTIONS
10
- from fontTools.misc.intTools import bit_count, bit_indices
11
- from fontTools.ttLib import TTFont
12
- from fontTools.ttLib.tables import otBase, otTables
13
-
14
- log = logging.getLogger(__name__)
15
-
16
- COMPRESSION_LEVEL = OPTIONS[f"{__name__}:COMPRESSION_LEVEL"]
17
-
18
- # Kept because ufo2ft depends on it, to be removed once ufo2ft uses the config instead
19
- # https://github.com/fonttools/fonttools/issues/2592
20
- GPOS_COMPACT_MODE_ENV_KEY = "FONTTOOLS_GPOS_COMPACT_MODE"
21
- GPOS_COMPACT_MODE_DEFAULT = str(COMPRESSION_LEVEL.default)
22
-
23
-
24
- def _compression_level_from_env() -> int:
25
- env_level = GPOS_COMPACT_MODE_DEFAULT
26
- if GPOS_COMPACT_MODE_ENV_KEY in os.environ:
27
- import warnings
28
-
29
- warnings.warn(
30
- f"'{GPOS_COMPACT_MODE_ENV_KEY}' environment variable is deprecated. "
31
- "Please set the 'fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL' option "
32
- "in TTFont.cfg.",
33
- DeprecationWarning,
34
- )
35
-
36
- env_level = os.environ[GPOS_COMPACT_MODE_ENV_KEY]
37
- if len(env_level) == 1 and env_level in "0123456789":
38
- return int(env_level)
39
- raise ValueError(f"Bad {GPOS_COMPACT_MODE_ENV_KEY}={env_level}")
40
-
41
-
42
- def compact(font: TTFont, level: int) -> TTFont:
43
- # Ideal plan:
44
- # 1. Find lookups of Lookup Type 2: Pair Adjustment Positioning Subtable
45
- # https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable
46
- # 2. Extract glyph-glyph kerning and class-kerning from all present subtables
47
- # 3. Regroup into different subtable arrangements
48
- # 4. Put back into the lookup
49
- #
50
- # Actual implementation:
51
- # 2. Only class kerning is optimized currently
52
- # 3. If the input kerning is already in several subtables, the subtables
53
- # are not grouped together first; instead each subtable is treated
54
- # independently, so currently this step is:
55
- # Split existing subtables into more smaller subtables
56
- gpos = font["GPOS"]
57
- for lookup in gpos.table.LookupList.Lookup:
58
- if lookup.LookupType == 2:
59
- compact_lookup(font, level, lookup)
60
- elif lookup.LookupType == 9 and lookup.SubTable[0].ExtensionLookupType == 2:
61
- compact_ext_lookup(font, level, lookup)
62
- return font
63
-
64
-
65
- def compact_lookup(font: TTFont, level: int, lookup: otTables.Lookup) -> None:
66
- new_subtables = compact_pair_pos(font, level, lookup.SubTable)
67
- lookup.SubTable = new_subtables
68
- lookup.SubTableCount = len(new_subtables)
69
-
70
-
71
- def compact_ext_lookup(font: TTFont, level: int, lookup: otTables.Lookup) -> None:
72
- new_subtables = compact_pair_pos(
73
- font, level, [ext_subtable.ExtSubTable for ext_subtable in lookup.SubTable]
74
- )
75
- new_ext_subtables = []
76
- for subtable in new_subtables:
77
- ext_subtable = otTables.ExtensionPos()
78
- ext_subtable.Format = 1
79
- ext_subtable.ExtSubTable = subtable
80
- new_ext_subtables.append(ext_subtable)
81
- lookup.SubTable = new_ext_subtables
82
- lookup.SubTableCount = len(new_ext_subtables)
83
-
84
-
85
- def compact_pair_pos(
86
- font: TTFont, level: int, subtables: Sequence[otTables.PairPos]
87
- ) -> Sequence[otTables.PairPos]:
88
- new_subtables = []
89
- for subtable in subtables:
90
- if subtable.Format == 1:
91
- # Not doing anything to Format 1 (yet?)
92
- new_subtables.append(subtable)
93
- elif subtable.Format == 2:
94
- new_subtables.extend(compact_class_pairs(font, level, subtable))
95
- return new_subtables
96
-
97
-
98
- def compact_class_pairs(
99
- font: TTFont, level: int, subtable: otTables.PairPos
100
- ) -> List[otTables.PairPos]:
101
- from fontTools.otlLib.builder import buildPairPosClassesSubtable
102
-
103
- subtables = []
104
- classes1: DefaultDict[int, List[str]] = defaultdict(list)
105
- for g in subtable.Coverage.glyphs:
106
- classes1[subtable.ClassDef1.classDefs.get(g, 0)].append(g)
107
- classes2: DefaultDict[int, List[str]] = defaultdict(list)
108
- for g, i in subtable.ClassDef2.classDefs.items():
109
- classes2[i].append(g)
110
- all_pairs = {}
111
- for i, class1 in enumerate(subtable.Class1Record):
112
- for j, class2 in enumerate(class1.Class2Record):
113
- if is_really_zero(class2):
114
- continue
115
- all_pairs[(tuple(sorted(classes1[i])), tuple(sorted(classes2[j])))] = (
116
- getattr(class2, "Value1", None),
117
- getattr(class2, "Value2", None),
118
- )
119
- grouped_pairs = cluster_pairs_by_class2_coverage_custom_cost(font, all_pairs, level)
120
- for pairs in grouped_pairs:
121
- subtables.append(buildPairPosClassesSubtable(pairs, font.getReverseGlyphMap()))
122
- return subtables
123
-
124
-
125
- def is_really_zero(class2: otTables.Class2Record) -> bool:
126
- v1 = getattr(class2, "Value1", None)
127
- v2 = getattr(class2, "Value2", None)
128
- return (v1 is None or v1.getEffectiveFormat() == 0) and (
129
- v2 is None or v2.getEffectiveFormat() == 0
130
- )
131
-
132
-
133
- Pairs = Dict[
134
- Tuple[Tuple[str, ...], Tuple[str, ...]],
135
- Tuple[otBase.ValueRecord, otBase.ValueRecord],
136
- ]
137
-
138
- # Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L935-L958
139
- def _getClassRanges(glyphIDs: Iterable[int]):
140
- glyphIDs = sorted(glyphIDs)
141
- last = glyphIDs[0]
142
- ranges = [[last]]
143
- for glyphID in glyphIDs[1:]:
144
- if glyphID != last + 1:
145
- ranges[-1].append(last)
146
- ranges.append([glyphID])
147
- last = glyphID
148
- ranges[-1].append(last)
149
- return ranges, glyphIDs[0], glyphIDs[-1]
150
-
151
-
152
- # Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L960-L989
153
- def _classDef_bytes(
154
- class_data: List[Tuple[List[Tuple[int, int]], int, int]],
155
- class_ids: List[int],
156
- coverage=False,
157
- ):
158
- if not class_ids:
159
- return 0
160
- first_ranges, min_glyph_id, max_glyph_id = class_data[class_ids[0]]
161
- range_count = len(first_ranges)
162
- for i in class_ids[1:]:
163
- data = class_data[i]
164
- range_count += len(data[0])
165
- min_glyph_id = min(min_glyph_id, data[1])
166
- max_glyph_id = max(max_glyph_id, data[2])
167
- glyphCount = max_glyph_id - min_glyph_id + 1
168
- # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-1
169
- format1_bytes = 6 + glyphCount * 2
170
- # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-2
171
- format2_bytes = 4 + range_count * 6
172
- return min(format1_bytes, format2_bytes)
173
-
174
-
175
- ClusteringContext = namedtuple(
176
- "ClusteringContext",
177
- [
178
- "lines",
179
- "all_class1",
180
- "all_class1_data",
181
- "all_class2_data",
182
- "valueFormat1_bytes",
183
- "valueFormat2_bytes",
184
- ],
185
- )
186
-
187
-
188
- class Cluster:
189
- # TODO(Python 3.7): Turn this into a dataclass
190
- # ctx: ClusteringContext
191
- # indices: int
192
- # Caches
193
- # TODO(Python 3.8): use functools.cached_property instead of the
194
- # manually cached properties, and remove the cache fields listed below.
195
- # _indices: Optional[List[int]] = None
196
- # _column_indices: Optional[List[int]] = None
197
- # _cost: Optional[int] = None
198
-
199
- __slots__ = "ctx", "indices_bitmask", "_indices", "_column_indices", "_cost"
200
-
201
- def __init__(self, ctx: ClusteringContext, indices_bitmask: int):
202
- self.ctx = ctx
203
- self.indices_bitmask = indices_bitmask
204
- self._indices = None
205
- self._column_indices = None
206
- self._cost = None
207
-
208
- @property
209
- def indices(self):
210
- if self._indices is None:
211
- self._indices = bit_indices(self.indices_bitmask)
212
- return self._indices
213
-
214
- @property
215
- def column_indices(self):
216
- if self._column_indices is None:
217
- # Indices of columns that have a 1 in at least 1 line
218
- # => binary OR all the lines
219
- bitmask = reduce(int.__or__, (self.ctx.lines[i] for i in self.indices))
220
- self._column_indices = bit_indices(bitmask)
221
- return self._column_indices
222
-
223
- @property
224
- def width(self):
225
- # Add 1 because Class2=0 cannot be used but needs to be encoded.
226
- return len(self.column_indices) + 1
227
-
228
- @property
229
- def cost(self):
230
- if self._cost is None:
231
- self._cost = (
232
- # 2 bytes to store the offset to this subtable in the Lookup table above
233
- 2
234
- # Contents of the subtable
235
- # From: https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#pair-adjustment-positioning-format-2-class-pair-adjustment
236
- # uint16 posFormat Format identifier: format = 2
237
- + 2
238
- # Offset16 coverageOffset Offset to Coverage table, from beginning of PairPos subtable.
239
- + 2
240
- + self.coverage_bytes
241
- # uint16 valueFormat1 ValueRecord definition — for the first glyph of the pair (may be zero).
242
- + 2
243
- # uint16 valueFormat2 ValueRecord definition — for the second glyph of the pair (may be zero).
244
- + 2
245
- # Offset16 classDef1Offset Offset to ClassDef table, from beginning of PairPos subtable — for the first glyph of the pair.
246
- + 2
247
- + self.classDef1_bytes
248
- # Offset16 classDef2Offset Offset to ClassDef table, from beginning of PairPos subtable — for the second glyph of the pair.
249
- + 2
250
- + self.classDef2_bytes
251
- # uint16 class1Count Number of classes in classDef1 table — includes Class 0.
252
- + 2
253
- # uint16 class2Count Number of classes in classDef2 table — includes Class 0.
254
- + 2
255
- # Class1Record class1Records[class1Count] Array of Class1 records, ordered by classes in classDef1.
256
- + (self.ctx.valueFormat1_bytes + self.ctx.valueFormat2_bytes)
257
- * len(self.indices)
258
- * self.width
259
- )
260
- return self._cost
261
-
262
- @property
263
- def coverage_bytes(self):
264
- format1_bytes = (
265
- # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-1
266
- # uint16 coverageFormat Format identifier — format = 1
267
- # uint16 glyphCount Number of glyphs in the glyph array
268
- 4
269
- # uint16 glyphArray[glyphCount] Array of glyph IDs — in numerical order
270
- + sum(len(self.ctx.all_class1[i]) for i in self.indices) * 2
271
- )
272
- ranges = sorted(
273
- chain.from_iterable(self.ctx.all_class1_data[i][0] for i in self.indices)
274
- )
275
- merged_range_count = 0
276
- last = None
277
- for (start, end) in ranges:
278
- if last is not None and start != last + 1:
279
- merged_range_count += 1
280
- last = end
281
- format2_bytes = (
282
- # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-2
283
- # uint16 coverageFormat Format identifier — format = 2
284
- # uint16 rangeCount Number of RangeRecords
285
- 4
286
- # RangeRecord rangeRecords[rangeCount] Array of glyph ranges — ordered by startGlyphID.
287
- # uint16 startGlyphID First glyph ID in the range
288
- # uint16 endGlyphID Last glyph ID in the range
289
- # uint16 startCoverageIndex Coverage Index of first glyph ID in range
290
- + merged_range_count * 6
291
- )
292
- return min(format1_bytes, format2_bytes)
293
-
294
- @property
295
- def classDef1_bytes(self):
296
- # We can skip encoding one of the Class1 definitions, and use
297
- # Class1=0 to represent it instead, because Class1 is gated by the
298
- # Coverage definition. Use Class1=0 for the highest byte savings.
299
- # Going through all options takes too long, pick the biggest class
300
- # = what happens in otlLib.builder.ClassDefBuilder.classes()
301
- biggest_index = max(self.indices, key=lambda i: len(self.ctx.all_class1[i]))
302
- return _classDef_bytes(
303
- self.ctx.all_class1_data, [i for i in self.indices if i != biggest_index]
304
- )
305
-
306
- @property
307
- def classDef2_bytes(self):
308
- # All Class2 need to be encoded because we can't use Class2=0
309
- return _classDef_bytes(self.ctx.all_class2_data, self.column_indices)
310
-
311
-
312
- def cluster_pairs_by_class2_coverage_custom_cost(
313
- font: TTFont,
314
- pairs: Pairs,
315
- compression: int = 5,
316
- ) -> List[Pairs]:
317
- if not pairs:
318
- # The subtable was actually empty?
319
- return [pairs]
320
-
321
- # Sorted for reproducibility/determinism
322
- all_class1 = sorted(set(pair[0] for pair in pairs))
323
- all_class2 = sorted(set(pair[1] for pair in pairs))
324
-
325
- # Use Python's big ints for binary vectors representing each line
326
- lines = [
327
- sum(
328
- 1 << i if (class1, class2) in pairs else 0
329
- for i, class2 in enumerate(all_class2)
330
- )
331
- for class1 in all_class1
332
- ]
333
-
334
- # Map glyph names to ids and work with ints throughout for ClassDef formats
335
- name_to_id = font.getReverseGlyphMap()
336
- # Each entry in the arrays below is (range_count, min_glyph_id, max_glyph_id)
337
- all_class1_data = [
338
- _getClassRanges(name_to_id[name] for name in cls) for cls in all_class1
339
- ]
340
- all_class2_data = [
341
- _getClassRanges(name_to_id[name] for name in cls) for cls in all_class2
342
- ]
343
-
344
- format1 = 0
345
- format2 = 0
346
- for pair, value in pairs.items():
347
- format1 |= value[0].getEffectiveFormat() if value[0] else 0
348
- format2 |= value[1].getEffectiveFormat() if value[1] else 0
349
- valueFormat1_bytes = bit_count(format1) * 2
350
- valueFormat2_bytes = bit_count(format2) * 2
351
-
352
- ctx = ClusteringContext(
353
- lines,
354
- all_class1,
355
- all_class1_data,
356
- all_class2_data,
357
- valueFormat1_bytes,
358
- valueFormat2_bytes,
359
- )
360
-
361
- cluster_cache: Dict[int, Cluster] = {}
362
-
363
- def make_cluster(indices: int) -> Cluster:
364
- cluster = cluster_cache.get(indices, None)
365
- if cluster is not None:
366
- return cluster
367
- cluster = Cluster(ctx, indices)
368
- cluster_cache[indices] = cluster
369
- return cluster
370
-
371
- def merge(cluster: Cluster, other: Cluster) -> Cluster:
372
- return make_cluster(cluster.indices_bitmask | other.indices_bitmask)
373
-
374
- # Agglomerative clustering by hand, checking the cost gain of the new
375
- # cluster against the previously separate clusters
376
- # Start with 1 cluster per line
377
- # cluster = set of lines = new subtable
378
- clusters = [make_cluster(1 << i) for i in range(len(lines))]
379
-
380
- # Cost of 1 cluster with everything
381
- # `(1 << len) - 1` gives a bitmask full of 1's of length `len`
382
- cost_before_splitting = make_cluster((1 << len(lines)) - 1).cost
383
- log.debug(f" len(clusters) = {len(clusters)}")
384
-
385
- while len(clusters) > 1:
386
- lowest_cost_change = None
387
- best_cluster_index = None
388
- best_other_index = None
389
- best_merged = None
390
- for i, cluster in enumerate(clusters):
391
- for j, other in enumerate(clusters[i + 1 :]):
392
- merged = merge(cluster, other)
393
- cost_change = merged.cost - cluster.cost - other.cost
394
- if lowest_cost_change is None or cost_change < lowest_cost_change:
395
- lowest_cost_change = cost_change
396
- best_cluster_index = i
397
- best_other_index = i + 1 + j
398
- best_merged = merged
399
- assert lowest_cost_change is not None
400
- assert best_cluster_index is not None
401
- assert best_other_index is not None
402
- assert best_merged is not None
403
-
404
- # If the best merge we found is still taking down the file size, then
405
- # there's no question: we must do it, because it's beneficial in both
406
- # ways (lower file size and lower number of subtables). However, if the
407
- # best merge we found is not reducing file size anymore, then we need to
408
- # look at the other stop criteria = the compression factor.
409
- if lowest_cost_change > 0:
410
- # Stop critera: check whether we should keep merging.
411
- # Compute size reduction brought by splitting
412
- cost_after_splitting = sum(c.cost for c in clusters)
413
- # size_reduction so that after = before * (1 - size_reduction)
414
- # E.g. before = 1000, after = 800, 1 - 800/1000 = 0.2
415
- size_reduction = 1 - cost_after_splitting / cost_before_splitting
416
-
417
- # Force more merging by taking into account the compression number.
418
- # Target behaviour: compression number = 1 to 9, default 5 like gzip
419
- # - 1 = accept to add 1 subtable to reduce size by 50%
420
- # - 5 = accept to add 5 subtables to reduce size by 50%
421
- # See https://github.com/harfbuzz/packtab/blob/master/Lib/packTab/__init__.py#L690-L691
422
- # Given the size reduction we have achieved so far, compute how many
423
- # new subtables are acceptable.
424
- max_new_subtables = -log2(1 - size_reduction) * compression
425
- log.debug(
426
- f" len(clusters) = {len(clusters):3d} size_reduction={size_reduction:5.2f} max_new_subtables={max_new_subtables}",
427
- )
428
- if compression == 9:
429
- # Override level 9 to mean: create any number of subtables
430
- max_new_subtables = len(clusters)
431
-
432
- # If we have managed to take the number of new subtables below the
433
- # threshold, then we can stop.
434
- if len(clusters) <= max_new_subtables + 1:
435
- break
436
-
437
- # No reason to stop yet, do the merge and move on to the next.
438
- del clusters[best_other_index]
439
- clusters[best_cluster_index] = best_merged
440
-
441
- # All clusters are final; turn bitmasks back into the "Pairs" format
442
- pairs_by_class1: Dict[Tuple[str, ...], Pairs] = defaultdict(dict)
443
- for pair, values in pairs.items():
444
- pairs_by_class1[pair[0]][pair] = values
445
- pairs_groups: List[Pairs] = []
446
- for cluster in clusters:
447
- pairs_group: Pairs = dict()
448
- for i in cluster.indices:
449
- class1 = all_class1[i]
450
- pairs_group.update(pairs_by_class1[class1])
451
- pairs_groups.append(pairs_group)
452
- return pairs_groups
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttx.py DELETED
@@ -1,469 +0,0 @@
1
- """\
2
- usage: ttx [options] inputfile1 [... inputfileN]
3
-
4
- TTX -- From OpenType To XML And Back
5
-
6
- If an input file is a TrueType or OpenType font file, it will be
7
- decompiled to a TTX file (an XML-based text format).
8
- If an input file is a TTX file, it will be compiled to whatever
9
- format the data is in, a TrueType or OpenType/CFF font file.
10
- A special input value of - means read from the standard input.
11
-
12
- Output files are created so they are unique: an existing file is
13
- never overwritten.
14
-
15
- General options
16
- ===============
17
-
18
- -h Help print this message.
19
- --version show version and exit.
20
- -d <outputfolder> Specify a directory where the output files are
21
- to be created.
22
- -o <outputfile> Specify a file to write the output to. A special
23
- value of - would use the standard output.
24
- -f Overwrite existing output file(s), ie. don't append
25
- numbers.
26
- -v Verbose: more messages will be written to stdout
27
- about what is being done.
28
- -q Quiet: No messages will be written to stdout about
29
- what is being done.
30
- -a allow virtual glyphs ID's on compile or decompile.
31
-
32
- Dump options
33
- ============
34
-
35
- -l List table info: instead of dumping to a TTX file, list
36
- some minimal info about each table.
37
- -t <table> Specify a table to dump. Multiple -t options
38
- are allowed. When no -t option is specified, all tables
39
- will be dumped.
40
- -x <table> Specify a table to exclude from the dump. Multiple
41
- -x options are allowed. -t and -x are mutually exclusive.
42
- -s Split tables: save the TTX data into separate TTX files per
43
- table and write one small TTX file that contains references
44
- to the individual table dumps. This file can be used as
45
- input to ttx, as long as the table files are in the
46
- same directory.
47
- -g Split glyf table: Save the glyf data into separate TTX files
48
- per glyph and write a small TTX for the glyf table which
49
- contains references to the individual TTGlyph elements.
50
- NOTE: specifying -g implies -s (no need for -s together
51
- with -g)
52
- -i Do NOT disassemble TT instructions: when this option is
53
- given, all TrueType programs (glyph programs, the font
54
- program and the pre-program) will be written to the TTX
55
- file as hex data instead of assembly. This saves some time
56
- and makes the TTX file smaller.
57
- -z <format> Specify a bitmap data export option for EBDT:
58
- {'raw', 'row', 'bitwise', 'extfile'} or for the CBDT:
59
- {'raw', 'extfile'} Each option does one of the following:
60
-
61
- -z raw
62
- export the bitmap data as a hex dump
63
- -z row
64
- export each row as hex data
65
- -z bitwise
66
- export each row as binary in an ASCII art style
67
- -z extfile
68
- export the data as external files with XML references
69
-
70
- If no export format is specified 'raw' format is used.
71
- -e Don't ignore decompilation errors, but show a full traceback
72
- and abort.
73
- -y <number> Select font number for TrueType Collection (.ttc/.otc),
74
- starting from 0.
75
- --unicodedata <UnicodeData.txt>
76
- Use custom database file to write character names in the
77
- comments of the cmap TTX output.
78
- --newline <value>
79
- Control how line endings are written in the XML file. It
80
- can be 'LF', 'CR', or 'CRLF'. If not specified, the
81
- default platform-specific line endings are used.
82
-
83
- Compile options
84
- ===============
85
-
86
- -m Merge with TrueType-input-file: specify a TrueType or
87
- OpenType font file to be merged with the TTX file. This
88
- option is only valid when at most one TTX file is specified.
89
- -b Don't recalc glyph bounding boxes: use the values in the
90
- TTX file as-is.
91
- --recalc-timestamp
92
- Set font 'modified' timestamp to current time.
93
- By default, the modification time of the TTX file will be
94
- used.
95
- --no-recalc-timestamp
96
- Keep the original font 'modified' timestamp.
97
- --flavor <type>
98
- Specify flavor of output font file. May be 'woff' or 'woff2'.
99
- Note that WOFF2 requires the Brotli Python extension,
100
- available at https://github.com/google/brotli
101
- --with-zopfli
102
- Use Zopfli instead of Zlib to compress WOFF. The Python
103
- extension is available at https://pypi.python.org/pypi/zopfli
104
- """
105
-
106
-
107
- from fontTools.ttLib import TTFont, TTLibError
108
- from fontTools.misc.macCreatorType import getMacCreatorAndType
109
- from fontTools.unicode import setUnicodeData
110
- from fontTools.misc.textTools import Tag, tostr
111
- from fontTools.misc.timeTools import timestampSinceEpoch
112
- from fontTools.misc.loggingTools import Timer
113
- from fontTools.misc.cliTools import makeOutputFileName
114
- import os
115
- import sys
116
- import getopt
117
- import re
118
- import logging
119
-
120
-
121
- log = logging.getLogger("fontTools.ttx")
122
-
123
- opentypeheaderRE = re.compile("""sfntVersion=['"]OTTO["']""")
124
-
125
-
126
- class Options(object):
127
-
128
- listTables = False
129
- outputDir = None
130
- outputFile = None
131
- overWrite = False
132
- verbose = False
133
- quiet = False
134
- splitTables = False
135
- splitGlyphs = False
136
- disassembleInstructions = True
137
- mergeFile = None
138
- recalcBBoxes = True
139
- ignoreDecompileErrors = True
140
- bitmapGlyphDataFormat = "raw"
141
- unicodedata = None
142
- newlinestr = "\n"
143
- recalcTimestamp = None
144
- flavor = None
145
- useZopfli = False
146
-
147
- def __init__(self, rawOptions, numFiles):
148
- self.onlyTables = []
149
- self.skipTables = []
150
- self.fontNumber = -1
151
- for option, value in rawOptions:
152
- # general options
153
- if option == "-h":
154
- print(__doc__)
155
- sys.exit(0)
156
- elif option == "--version":
157
- from fontTools import version
158
-
159
- print(version)
160
- sys.exit(0)
161
- elif option == "-d":
162
- if not os.path.isdir(value):
163
- raise getopt.GetoptError(
164
- "The -d option value must be an existing directory"
165
- )
166
- self.outputDir = value
167
- elif option == "-o":
168
- self.outputFile = value
169
- elif option == "-f":
170
- self.overWrite = True
171
- elif option == "-v":
172
- self.verbose = True
173
- elif option == "-q":
174
- self.quiet = True
175
- # dump options
176
- elif option == "-l":
177
- self.listTables = True
178
- elif option == "-t":
179
- # pad with space if table tag length is less than 4
180
- value = value.ljust(4)
181
- self.onlyTables.append(value)
182
- elif option == "-x":
183
- # pad with space if table tag length is less than 4
184
- value = value.ljust(4)
185
- self.skipTables.append(value)
186
- elif option == "-s":
187
- self.splitTables = True
188
- elif option == "-g":
189
- # -g implies (and forces) splitTables
190
- self.splitGlyphs = True
191
- self.splitTables = True
192
- elif option == "-i":
193
- self.disassembleInstructions = False
194
- elif option == "-z":
195
- validOptions = ("raw", "row", "bitwise", "extfile")
196
- if value not in validOptions:
197
- raise getopt.GetoptError(
198
- "-z does not allow %s as a format. Use %s"
199
- % (option, validOptions)
200
- )
201
- self.bitmapGlyphDataFormat = value
202
- elif option == "-y":
203
- self.fontNumber = int(value)
204
- # compile options
205
- elif option == "-m":
206
- self.mergeFile = value
207
- elif option == "-b":
208
- self.recalcBBoxes = False
209
- elif option == "-e":
210
- self.ignoreDecompileErrors = False
211
- elif option == "--unicodedata":
212
- self.unicodedata = value
213
- elif option == "--newline":
214
- validOptions = ("LF", "CR", "CRLF")
215
- if value == "LF":
216
- self.newlinestr = "\n"
217
- elif value == "CR":
218
- self.newlinestr = "\r"
219
- elif value == "CRLF":
220
- self.newlinestr = "\r\n"
221
- else:
222
- raise getopt.GetoptError(
223
- "Invalid choice for --newline: %r (choose from %s)"
224
- % (value, ", ".join(map(repr, validOptions)))
225
- )
226
- elif option == "--recalc-timestamp":
227
- self.recalcTimestamp = True
228
- elif option == "--no-recalc-timestamp":
229
- self.recalcTimestamp = False
230
- elif option == "--flavor":
231
- self.flavor = value
232
- elif option == "--with-zopfli":
233
- self.useZopfli = True
234
- if self.verbose and self.quiet:
235
- raise getopt.GetoptError("-q and -v options are mutually exclusive")
236
- if self.verbose:
237
- self.logLevel = logging.DEBUG
238
- elif self.quiet:
239
- self.logLevel = logging.WARNING
240
- else:
241
- self.logLevel = logging.INFO
242
- if self.mergeFile and self.flavor:
243
- raise getopt.GetoptError("-m and --flavor options are mutually exclusive")
244
- if self.onlyTables and self.skipTables:
245
- raise getopt.GetoptError("-t and -x options are mutually exclusive")
246
- if self.mergeFile and numFiles > 1:
247
- raise getopt.GetoptError(
248
- "Must specify exactly one TTX source file when using -m"
249
- )
250
- if self.flavor != "woff" and self.useZopfli:
251
- raise getopt.GetoptError("--with-zopfli option requires --flavor 'woff'")
252
-
253
-
254
- def ttList(input, output, options):
255
- ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True)
256
- reader = ttf.reader
257
- tags = sorted(reader.keys())
258
- print('Listing table info for "%s":' % input)
259
- format = " %4s %10s %8s %8s"
260
- print(format % ("tag ", " checksum", " length", " offset"))
261
- print(format % ("----", "----------", "--------", "--------"))
262
- for tag in tags:
263
- entry = reader.tables[tag]
264
- if ttf.flavor == "woff2":
265
- # WOFF2 doesn't store table checksums, so they must be calculated
266
- from fontTools.ttLib.sfnt import calcChecksum
267
-
268
- data = entry.loadData(reader.transformBuffer)
269
- checkSum = calcChecksum(data)
270
- else:
271
- checkSum = int(entry.checkSum)
272
- if checkSum < 0:
273
- checkSum = checkSum + 0x100000000
274
- checksum = "0x%08X" % checkSum
275
- print(format % (tag, checksum, entry.length, entry.offset))
276
- print()
277
- ttf.close()
278
-
279
-
280
- @Timer(log, "Done dumping TTX in %(time).3f seconds")
281
- def ttDump(input, output, options):
282
- input_name = input
283
- if input == "-":
284
- input, input_name = sys.stdin.buffer, sys.stdin.name
285
- output_name = output
286
- if output == "-":
287
- output, output_name = sys.stdout, sys.stdout.name
288
- log.info('Dumping "%s" to "%s"...', input_name, output_name)
289
- if options.unicodedata:
290
- setUnicodeData(options.unicodedata)
291
- ttf = TTFont(
292
- input,
293
- 0,
294
- ignoreDecompileErrors=options.ignoreDecompileErrors,
295
- fontNumber=options.fontNumber,
296
- )
297
- ttf.saveXML(
298
- output,
299
- tables=options.onlyTables,
300
- skipTables=options.skipTables,
301
- splitTables=options.splitTables,
302
- splitGlyphs=options.splitGlyphs,
303
- disassembleInstructions=options.disassembleInstructions,
304
- bitmapGlyphDataFormat=options.bitmapGlyphDataFormat,
305
- newlinestr=options.newlinestr,
306
- )
307
- ttf.close()
308
-
309
-
310
- @Timer(log, "Done compiling TTX in %(time).3f seconds")
311
- def ttCompile(input, output, options):
312
- input_name = input
313
- if input == "-":
314
- input, input_name = sys.stdin, sys.stdin.name
315
- output_name = output
316
- if output == "-":
317
- output, output_name = sys.stdout.buffer, sys.stdout.name
318
- log.info('Compiling "%s" to "%s"...' % (input_name, output))
319
- if options.useZopfli:
320
- from fontTools.ttLib import sfnt
321
-
322
- sfnt.USE_ZOPFLI = True
323
- ttf = TTFont(
324
- options.mergeFile,
325
- flavor=options.flavor,
326
- recalcBBoxes=options.recalcBBoxes,
327
- recalcTimestamp=options.recalcTimestamp,
328
- )
329
- ttf.importXML(input)
330
-
331
- if options.recalcTimestamp is None and "head" in ttf and input is not sys.stdin:
332
- # use TTX file modification time for head "modified" timestamp
333
- mtime = os.path.getmtime(input)
334
- ttf["head"].modified = timestampSinceEpoch(mtime)
335
-
336
- ttf.save(output)
337
-
338
-
339
- def guessFileType(fileName):
340
- if fileName == "-":
341
- header = sys.stdin.buffer.peek(256)
342
- ext = ""
343
- else:
344
- base, ext = os.path.splitext(fileName)
345
- try:
346
- with open(fileName, "rb") as f:
347
- header = f.read(256)
348
- except IOError:
349
- return None
350
-
351
- if header.startswith(b"\xef\xbb\xbf<?xml"):
352
- header = header.lstrip(b"\xef\xbb\xbf")
353
- cr, tp = getMacCreatorAndType(fileName)
354
- if tp in ("sfnt", "FFIL"):
355
- return "TTF"
356
- if ext == ".dfont":
357
- return "TTF"
358
- head = Tag(header[:4])
359
- if head == "OTTO":
360
- return "OTF"
361
- elif head == "ttcf":
362
- return "TTC"
363
- elif head in ("\0\1\0\0", "true"):
364
- return "TTF"
365
- elif head == "wOFF":
366
- return "WOFF"
367
- elif head == "wOF2":
368
- return "WOFF2"
369
- elif head == "<?xm":
370
- # Use 'latin1' because that can't fail.
371
- header = tostr(header, "latin1")
372
- if opentypeheaderRE.search(header):
373
- return "OTX"
374
- else:
375
- return "TTX"
376
- return None
377
-
378
-
379
- def parseOptions(args):
380
- rawOptions, files = getopt.getopt(
381
- args,
382
- "ld:o:fvqht:x:sgim:z:baey:",
383
- [
384
- "unicodedata=",
385
- "recalc-timestamp",
386
- "no-recalc-timestamp",
387
- "flavor=",
388
- "version",
389
- "with-zopfli",
390
- "newline=",
391
- ],
392
- )
393
-
394
- options = Options(rawOptions, len(files))
395
- jobs = []
396
-
397
- if not files:
398
- raise getopt.GetoptError("Must specify at least one input file")
399
-
400
- for input in files:
401
- if input != "-" and not os.path.isfile(input):
402
- raise getopt.GetoptError('File not found: "%s"' % input)
403
- tp = guessFileType(input)
404
- if tp in ("OTF", "TTF", "TTC", "WOFF", "WOFF2"):
405
- extension = ".ttx"
406
- if options.listTables:
407
- action = ttList
408
- else:
409
- action = ttDump
410
- elif tp == "TTX":
411
- extension = "." + options.flavor if options.flavor else ".ttf"
412
- action = ttCompile
413
- elif tp == "OTX":
414
- extension = "." + options.flavor if options.flavor else ".otf"
415
- action = ttCompile
416
- else:
417
- raise getopt.GetoptError('Unknown file type: "%s"' % input)
418
-
419
- if options.outputFile:
420
- output = options.outputFile
421
- else:
422
- if input == "-":
423
- raise getopt.GetoptError("Must provide -o when reading from stdin")
424
- output = makeOutputFileName(
425
- input, options.outputDir, extension, options.overWrite
426
- )
427
- # 'touch' output file to avoid race condition in choosing file names
428
- if action != ttList:
429
- open(output, "a").close()
430
- jobs.append((action, input, output))
431
- return jobs, options
432
-
433
-
434
- def process(jobs, options):
435
- for action, input, output in jobs:
436
- action(input, output, options)
437
-
438
-
439
- def main(args=None):
440
- """Convert OpenType fonts to XML and back"""
441
- from fontTools import configLogger
442
-
443
- if args is None:
444
- args = sys.argv[1:]
445
- try:
446
- jobs, options = parseOptions(args)
447
- except getopt.GetoptError as e:
448
- print("%s\nERROR: %s" % (__doc__, e), file=sys.stderr)
449
- sys.exit(2)
450
-
451
- configLogger(level=options.logLevel)
452
-
453
- try:
454
- process(jobs, options)
455
- except KeyboardInterrupt:
456
- log.error("(Cancelled.)")
457
- sys.exit(1)
458
- except SystemExit:
459
- raise
460
- except TTLibError as e:
461
- log.error(e)
462
- sys.exit(1)
463
- except:
464
- log.exception("Unhandled exception has occurred")
465
- sys.exit(1)
466
-
467
-
468
- if __name__ == "__main__":
469
- sys.exit(main())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DaleChen/AutoGPT/Dockerfile DELETED
@@ -1,38 +0,0 @@
1
- # Use an official Python base image from the Docker Hub
2
- FROM python:3.10-slim
3
-
4
- # Install git
5
- RUN apt-get -y update
6
- RUN apt-get -y install git chromium-driver
7
-
8
- # Install Xvfb and other dependencies for headless browser testing
9
- RUN apt-get update \
10
- && apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates
11
-
12
- # Install Firefox / Chromium
13
- RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \
14
- && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \
15
- && apt-get update \
16
- && apt-get install -y chromium firefox-esr
17
-
18
- # Set environment variables
19
- ENV PIP_NO_CACHE_DIR=yes \
20
- PYTHONUNBUFFERED=1 \
21
- PYTHONDONTWRITEBYTECODE=1
22
-
23
- # Create a non-root user and set permissions
24
- RUN useradd --create-home appuser
25
- WORKDIR /home/appuser
26
- RUN chown appuser:appuser /home/appuser
27
- USER appuser
28
-
29
- # Copy the requirements.txt file and install the requirements
30
- COPY --chown=appuser:appuser requirements.txt .
31
- RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
32
- pip install --no-cache-dir --user -r requirements.txt
33
-
34
- # Copy the application files
35
- COPY --chown=appuser:appuser autogpt/ ./autogpt
36
-
37
- # Set the entrypoint
38
- ENTRYPOINT ["python", "-m", "autogpt"]