diff --git a/spaces/0x90e/ESRGAN-MANGA/ESRGAN_plus/block.py b/spaces/0x90e/ESRGAN-MANGA/ESRGAN_plus/block.py deleted file mode 100644 index 5c9cd2fbc2334080fa4b1a85bd1a37769a396d55..0000000000000000000000000000000000000000 --- a/spaces/0x90e/ESRGAN-MANGA/ESRGAN_plus/block.py +++ /dev/null @@ -1,287 +0,0 @@ -from collections import OrderedDict -import torch -import torch.nn as nn - -#################### -# Basic blocks -#################### - - -def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1): - # helper selecting activation - # neg_slope: for leakyrelu and init of prelu - # n_prelu: for p_relu num_parameters - act_type = act_type.lower() - if act_type == 'relu': - layer = nn.ReLU(inplace) - elif act_type == 'leakyrelu': - layer = nn.LeakyReLU(neg_slope, inplace) - elif act_type == 'prelu': - layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope) - else: - raise NotImplementedError('activation layer [{:s}] is not found'.format(act_type)) - return layer - - -def norm(norm_type, nc): - # helper selecting normalization layer - norm_type = norm_type.lower() - if norm_type == 'batch': - layer = nn.BatchNorm2d(nc, affine=True) - elif norm_type == 'instance': - layer = nn.InstanceNorm2d(nc, affine=False) - else: - raise NotImplementedError('normalization layer [{:s}] is not found'.format(norm_type)) - return layer - - -def pad(pad_type, padding): - # helper selecting padding layer - # if padding is 'zero', do by conv layers - pad_type = pad_type.lower() - if padding == 0: - return None - if pad_type == 'reflect': - layer = nn.ReflectionPad2d(padding) - elif pad_type == 'replicate': - layer = nn.ReplicationPad2d(padding) - else: - raise NotImplementedError('padding layer [{:s}] is not implemented'.format(pad_type)) - return layer - - -def get_valid_padding(kernel_size, dilation): - kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1) - padding = (kernel_size - 1) // 2 - return padding - - -class ConcatBlock(nn.Module): - # Concat the output of a submodule to its input - def __init__(self, submodule): - super(ConcatBlock, self).__init__() - self.sub = submodule - - def forward(self, x): - output = torch.cat((x, self.sub(x)), dim=1) - return output - - def __repr__(self): - tmpstr = 'Identity .. \n|' - modstr = self.sub.__repr__().replace('\n', '\n|') - tmpstr = tmpstr + modstr - return tmpstr - - -class ShortcutBlock(nn.Module): - #Elementwise sum the output of a submodule to its input - def __init__(self, submodule): - super(ShortcutBlock, self).__init__() - self.sub = submodule - - def forward(self, x): - output = x + self.sub(x) - return output - - def __repr__(self): - tmpstr = 'Identity + \n|' - modstr = self.sub.__repr__().replace('\n', '\n|') - tmpstr = tmpstr + modstr - return tmpstr - - -def sequential(*args): - # Flatten Sequential. It unwraps nn.Sequential. - if len(args) == 1: - if isinstance(args[0], OrderedDict): - raise NotImplementedError('sequential does not support OrderedDict input.') - return args[0] # No sequential is needed. - modules = [] - for module in args: - if isinstance(module, nn.Sequential): - for submodule in module.children(): - modules.append(submodule) - elif isinstance(module, nn.Module): - modules.append(module) - return nn.Sequential(*modules) - - -def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True, \ - pad_type='zero', norm_type=None, act_type='relu', mode='CNA'): - ''' - Conv layer with padding, normalization, activation - mode: CNA --> Conv -> Norm -> Act - NAC --> Norm -> Act --> Conv (Identity Mappings in Deep Residual Networks, ECCV16) - ''' - assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) - padding = get_valid_padding(kernel_size, dilation) - p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None - padding = padding if pad_type == 'zero' else 0 - - c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, \ - dilation=dilation, bias=bias, groups=groups) - a = act(act_type) if act_type else None - if 'CNA' in mode: - n = norm(norm_type, out_nc) if norm_type else None - return sequential(p, c, n, a) - elif mode == 'NAC': - if norm_type is None and act_type is not None: - a = act(act_type, inplace=False) - # Important! - # input----ReLU(inplace)----Conv--+----output - # |________________________| - # inplace ReLU will modify the input, therefore wrong output - n = norm(norm_type, in_nc) if norm_type else None - return sequential(n, a, p, c) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) - - -class GaussianNoise(nn.Module): - def __init__(self, sigma=0.1, is_relative_detach=False): - super().__init__() - self.sigma = sigma - self.is_relative_detach = is_relative_detach - self.noise = torch.tensor(0, dtype=torch.float).to(torch.device('cuda')) - - def forward(self, x): - if self.training and self.sigma != 0: - scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x - sampled_noise = self.noise.repeat(*x.size()).normal_() * scale - x = x + sampled_noise - return x - - -#################### -# Useful blocks -#################### - - -class ResNetBlock(nn.Module): - ''' - ResNet Block, 3-3 style - with extra residual scaling used in EDSR - (Enhanced Deep Residual Networks for Single Image Super-Resolution, CVPRW 17) - ''' - - def __init__(self, in_nc, mid_nc, out_nc, kernel_size=3, stride=1, dilation=1, groups=1, \ - bias=True, pad_type='zero', norm_type=None, act_type='relu', mode='CNA', res_scale=1): - super(ResNetBlock, self).__init__() - conv0 = conv_block(in_nc, mid_nc, kernel_size, stride, dilation, groups, bias, pad_type, \ - norm_type, act_type, mode) - if mode == 'CNA': - act_type = None - if mode == 'CNAC': # Residual path: |-CNAC-| - act_type = None - norm_type = None - conv1 = conv_block(mid_nc, out_nc, kernel_size, stride, dilation, groups, bias, pad_type, \ - norm_type, act_type, mode) - # if in_nc != out_nc: - # self.project = conv_block(in_nc, out_nc, 1, stride, dilation, 1, bias, pad_type, \ - # None, None) - # print('Need a projecter in ResNetBlock.') - # else: - # self.project = lambda x:x - self.res = sequential(conv0, conv1) - self.res_scale = res_scale - - def forward(self, x): - res = self.res(x).mul(self.res_scale) - return x + res - - -class ResidualDenseBlock_5C(nn.Module): - ''' - Residual Dense Block - style: 5 convs - The core module of paper: (Residual Dense Network for Image Super-Resolution, CVPR 18) - ''' - - def __init__(self, nc, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \ - norm_type=None, act_type='leakyrelu', mode='CNA', noise_input=True): - super(ResidualDenseBlock_5C, self).__init__() - # gc: growth channel, i.e. intermediate channels - self.noise = GaussianNoise() if noise_input else None - self.conv1x1 = conv1x1(nc, gc) - self.conv1 = conv_block(nc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, \ - norm_type=norm_type, act_type=act_type, mode=mode) - self.conv2 = conv_block(nc+gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, \ - norm_type=norm_type, act_type=act_type, mode=mode) - self.conv3 = conv_block(nc+2*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, \ - norm_type=norm_type, act_type=act_type, mode=mode) - self.conv4 = conv_block(nc+3*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, \ - norm_type=norm_type, act_type=act_type, mode=mode) - if mode == 'CNA': - last_act = None - else: - last_act = act_type - self.conv5 = conv_block(nc+4*gc, nc, 3, stride, bias=bias, pad_type=pad_type, \ - norm_type=norm_type, act_type=last_act, mode=mode) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv2(torch.cat((x, x1), 1)) - x2 = x2 + self.conv1x1(x) - x3 = self.conv3(torch.cat((x, x1, x2), 1)) - x4 = self.conv4(torch.cat((x, x1, x2, x3), 1)) - x4 = x4 + x2 - x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) - return self.noise(x5.mul(0.2) + x) - - -class RRDB(nn.Module): - ''' - Residual in Residual Dense Block - (ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks) - ''' - - def __init__(self, nc, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \ - norm_type=None, act_type='leakyrelu', mode='CNA'): - super(RRDB, self).__init__() - self.RDB1 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, \ - norm_type, act_type, mode) - self.RDB2 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, \ - norm_type, act_type, mode) - self.RDB3 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, \ - norm_type, act_type, mode) - self.noise = GaussianNoise() - - def forward(self, x): - out = self.RDB1(x) - out = self.RDB2(out) - out = self.RDB3(out) - return self.noise(out.mul(0.2) + x) - - -#################### -# Upsampler -#################### - - -def pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, \ - pad_type='zero', norm_type=None, act_type='relu'): - ''' - Pixel shuffle layer - (Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional - Neural Network, CVPR17) - ''' - conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias, \ - pad_type=pad_type, norm_type=None, act_type=None) - pixel_shuffle = nn.PixelShuffle(upscale_factor) - - n = norm(norm_type, out_nc) if norm_type else None - a = act(act_type) if act_type else None - return sequential(conv, pixel_shuffle, n, a) - - -def upconv_blcok(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, \ - pad_type='zero', norm_type=None, act_type='relu', mode='nearest'): - # Up conv - # described in https://distill.pub/2016/deconv-checkerboard/ - upsample = nn.Upsample(scale_factor=upscale_factor, mode=mode) - conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias, \ - pad_type=pad_type, norm_type=norm_type, act_type=act_type) - return sequential(upsample, conv) diff --git a/spaces/1368565466ki/ZSTRD/modules.py b/spaces/1368565466ki/ZSTRD/modules.py deleted file mode 100644 index 56ea4145eddf19dd330a3a41ab0183efc1686d83..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/ZSTRD/modules.py +++ /dev/null @@ -1,388 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Autocad 2012 Crack Kickass Torre The Best Way to Enjoy Autocad without Paying a Dime.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Autocad 2012 Crack Kickass Torre The Best Way to Enjoy Autocad without Paying a Dime.md deleted file mode 100644 index 9ac136cc1517bcdca90db110d4510abc5d325672..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Autocad 2012 Crack Kickass Torre The Best Way to Enjoy Autocad without Paying a Dime.md +++ /dev/null @@ -1,108 +0,0 @@ -
-

Autocad 2012 Crack Kickass Torre: What You Need to Know

-

If you are looking for a way to get Autocad 2012, a popular software for designing and drafting, for free, you might have come across the term "Autocad 2012 Crack Kickass Torre". But what does it mean and how can you use it? In this article, we will explain everything you need to know about Autocad 2012 Crack Kickass Torre, including what it is, how it works, and what are the risks and benefits of using it. We will also provide you with a step-by-step guide on how to download and install Autocad 2012 Crack from Kickass Torre, as well as some tips and tricks for using it successfully.

-

What is Autocad 2012?

-

Autocad 2012 is a software developed by Autodesk that allows you to create and edit 2D and 3D designs. It is widely used by architects, engineers, designers, and other professionals who need to create accurate and detailed drawings. Autocad 2012 has many features and benefits that make it a powerful and versatile tool for design and drafting.

-

Autocad 2012 Crack Kickass Torre


DOWNLOAD ––– https://byltly.com/2uKzpM



-

Features and Benefits of Autocad 2012

-

Some of the features and benefits of Autocad 2012 are:

- -

System Requirements and Compatibility of Autocad 2012

-

To run Autocad 2012 smoothly on your computer, you need to meet the following system requirements:

-

Autocad 2012 full version with crack torrent download
-How to install Autocad 2012 cracked by kickass
-Autocad 2012 keygen free download kickass
-Autocad 2012 activation code generator torrent
-Autocad 2012 64 bit crack kickass torrent
-Autocad 2012 serial number and product key torrent
-Autocad 2012 patch file download kickass
-Autocad 2012 license key crack torrent
-Autocad 2012 crack only download kickass
-Autocad 2012 xforce keygen torrent download
-Autocad 2012 crack for mac kickass torrent
-Autocad 2012 portable edition torrent kickass
-Autocad 2012 crack kickass torre alternative sites
-Autocad 2012 crack kickass torre proxy list
-Autocad 2012 crack kickass torre magnet link
-Autocad 2012 crack kickass torre VPN service
-Autocad 2012 crack kickass torre safe or not
-Autocad 2012 crack kickass torre legal or illegal
-Autocad 2012 crack kickass torre reviews and ratings
-Autocad 2012 crack kickass torre problems and solutions
-Autocad 2012 crack kickass torre tips and tricks
-Autocad 2012 crack kickass torre best practices and guidelines
-Autocad 2012 crack kickass torre advantages and disadvantages
-Autocad 2012 crack kickass torre features and benefits
-Autocad 2012 crack kickass torre comparison and contrast
-Autocad 2012 crack kickass torre pros and cons
-Autocad 2012 crack kickass torre FAQs and answers
-Autocad 2012 crack kickass torre tutorials and guides
-Autocad 2012 crack kickass torre videos and demos
-Autocad 2012 crack kickass torre blogs and forums
-Autocad 2012 crack kickass torre news and updates
-Autocad 2012 crack kickass torre case studies and testimonials
-Autocad 2012 crack kickass torre success stories and examples
-Autocad 2012 crack kickass torre statistics and facts
-Autocad 2012 crack kickass torre research and analysis
-Autocad 2012 crack kickass torre tools and resources
-Autocad 2012 crack kickass torre software and apps
-Autocad 2012 crack kickass torre courses and training
-Autocad 2012 crack kickass torre ebooks and reports
-Autocad 2012 crack kickass torre infographics and images
-Autocad 2012 crack kickass torre podcasts and audio
-Autocad 2012 crack kickass torre webinars and events
-Autocad 2012 crack kickass torre offers and discounts
-Autocad 2012 crack kickass torre coupons and codes
-Autocad 2012 crack kickass torre free trial and download
-Autocad 2012 crack kickass torre bonus and giveaway
-Autocad 2012 crack kickass torre affiliate program and commission
-Autocad 2012 crack kickass torre customer service and support
-Autocad 2012 crack kickass torre feedback and suggestions

- -

To use Autocad 2012 on your computer, you also need to have a valid license key that activates the software. A license key is a unique code that proves that you have purchased the software legally from Autodesk or an authorized reseller. A license key can be obtained by paying a subscription fee or buying a perpetual license. However, some people may not be able or willing to pay for the software, so they look for alternative ways to get it for free. One of these ways is using a crack.

-

What is a Crack?

-

A crack is a program or file that modifies or bypasses the original software's security features. A crack can be used to remove the license verification process or generate fake license keys that trick the software into thinking that it is activated. A crack can also be used to unlock additional features or functions that are not available in the original software. A crack can be applied to various types of software, such as games, applications, operating systems, etc.

-

How Cracks Work and Why People Use Them

-

A crack works by altering the code or data of the original software in some way. For example, a crack may change some values in the registry or memory that control the license verification process. A crack may also replace some files or folders in the installation directory that contain the security features. A crack may also inject some code into the running process of the software that disables the security features. A crack may also create some fake files or folders that mimic the original ones but contain different information.

-

People use cracks for various reasons. Some of the common reasons are:

- -

Risks and Drawbacks of Using Cracks

-

While using cracks may seem tempting or convenient for some people, there are also many risks and drawbacks associated with them. Some of the common risks and drawbacks are:

- -

Therefore, using cracks is not recommended as it may cause more harm than good. If you want to use Autocad 2012 legally and safely, you should buy it from Autodesk or an authorized reseller. However, if you still insist on using a crack for Autocad 2012, you should be aware of where to get it from and how to use it properly. One of the sources where people get cracks from is Kickass Torre.

-

What is Kickass Torre?

-

Kickass Torre is a website that hosts torrent files for various types of content, such as movies, music, games, software, etc. A torrent file is a small file that contains information about the content and how to download it from other users who have the content on their computers. A torrent file does not contain the content itself, but rather the metadata that helps to locate and download it. To download a torrent file, you need a torrent client, such as BitTorrent, uTorrent, or qBittorrent, that can read the torrent file and connect to other users who have the content on their computers. The process of downloading a torrent file is called seeding, and the process of uploading a torrent file is called leeching. The more seeders and le

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Elysian cracked roblox download 13 Full lua loadstrings scripts and more.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Elysian cracked roblox download 13 Full lua loadstrings scripts and more.md deleted file mode 100644 index 500c6a317d9cbb74efc48e415d4dd322597588ab..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Elysian cracked roblox download 13 Full lua loadstrings scripts and more.md +++ /dev/null @@ -1,92 +0,0 @@ - -

Elysian Cracked Roblox Download 13: Everything You Need to Know

-

Roblox is one of the most popular online gaming platforms in the world, with millions of players creating and exploring various games every day. However, some players may want to enhance their gaming experience by using exploits, which are tools that can modify the game's code and give them an advantage over other players. One of the most powerful and sought-after exploits for Roblox is Elysian, which can execute any script and bypass any anti-cheat system. In this article, we will tell you everything you need to know about Elysian cracked Roblox download 13, including what it is, how to get it, why use it, how to use it, and some tips and tricks for using it safely and effectively.

-

elysian cracked roblox download 13


DOWNLOADhttps://byltly.com/2uKv5b



-

What is Elysian?

-

Elysian is a premium exploit for Roblox that can run any script with full Lua support, loadstrings, and more. It can also bypass any anti-exploit system that Roblox has, such as Filtering Enabled, Remote Spy, Script Dumper, etc. This means that you can use Elysian to execute any script that you want without getting detected or banned by Roblox. Some of the scripts that you can use with Elysian are aimbot, fly, noclip, speed, teleport, kill all, admin commands, and more.

-

Features of Elysian

-

Some of the features that make Elysian one of the best exploits for Roblox are:

- -

How to get Elysian cracked version

-

Elysian is a premium exploit that costs $20 to buy from its official website. However, some people may not be able or willing to pay for it. In that case, they may look for a cracked version of Elysian that they can download for free from other sources. A cracked version is a modified version of the original exploit that bypasses its security features and allows anyone to use it without paying.

-

However, getting a cracked version of Elysian is not easy or safe. Most of the websites or videos that claim to offer a free download link for Elysian cracked are fake or malicious. They may contain viruses, malware, spyware, ransomware, or other harmful programs that can infect your computer or steal your personal information. They may also ask you to complete surveys, download apps, enter your email or password, or perform other tasks that are scams or phishing attempts.

-

elysian v2 full lua roblox exploit
-roblox elysian cracked dll injector
-elysian v3.7 roblox hack free download
-how to use elysian scripts on roblox
-roblox elysian exploit 2017 working
-elysian cracked roblox download mega
-roblox elysian v2 loadstrings scripts
-elysian roblox exploit youtube video
-roblox elysian hack 2017 sendspace
-elysian v3.7 roblox rc7 cracked
-roblox elysian exploit 2013 download
-elysian v2 roblox hack credit aero
-roblox elysian cracked easy fun
-elysian v3.7 roblox exploit 2017
-roblox elysian hack dll injector bitly
-elysian v2 full lua loadstrings more
-roblox elysian exploit yay download desc
-elysian cracked roblox download virus
-roblox elysian v2 scripts more astherix yt
-elysian v3.7 roblox hack working 2017
-roblox elysian cracked download injector
-elysian v2 roblox exploit youtube astherix
-roblox elysian hack 2013 dll bitly
-elysian cracked roblox download mega nz
-roblox elysian v2 loadstrings w scripts more
-elysian roblox exploit youtube nobody 7
-roblox elysian hack 2017 sendspace file
-elysian v3.7 roblox rc7 download working
-roblox elysian exploit 2013 yay desc nobody
-elysian cracked roblox download not scared
-roblox elysian v2 scripts more youtube video
-elysian v3.7 roblox hack free sendspace com
-roblox elysian cracked easy fun nobody 7
-elysian v3.7 roblox exploit working 2017 youtube
-roblox elysian hack dll injector http bitly
-elysian v2 full lua more youtube video
-roblox elysian exploit yay desc youtube nobody
-elysian cracked roblox download virus scared
-roblox elysian v2 loadstrings w scripts astherix
-elysian v3.7 roblox rc7 free download sendspace
-roblox elysian exploit 2013 download bitly
-elysian v2 full lua credit aero youtube astherix
-roblox elysian cracked easy fun youtube video
-elysian v3.7 roblox exploit 2017 youtube video
-roblox elysian hack dll injector bitly virus
-elysian v2 full lua loadstrings more astherix yt
-roblox elysian exploit yay download desc bitly
-elysian cracked roblox download virus not scared
-roblox elysian v2 loadstrings scripts more video

-

Therefore, we do not recommend downloading or using a cracked version of Elysian. It is illegal, unethical, risky, and unreliable. If you want to use Elysian for Roblox, you should buy it from its official website and support its developers.

-

Why use Elysian for Roblox?

-

Elysian is one of the best exploits for Roblox because it can give you many advantages and benefits over other players. Some of the reasons why you may want to use Elysian for Roblox are:

-

Benefits of using Elysian

- -

Risks of using Elysian

- -

How to use Elysian for Roblox?

-

If you have bought Elysian from its official website and want to use it for Roblox, here are the steps that you need to follow:

-

Step 1: Download and install Elysian

-

After purchasing Elysian from its website , you will receive an email with a download link for the exploit. Click on the link and download the zip file containing the exploit files. Extract the zip file to a folder on your computer and run the setup.exe file as administrator. Follow the instructions on the screen to install Elysian on your computer.

-

Step 2: Run Elysian and select a script

-

After installing Elysian on your computer

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2013 Free Download A Step-by-Step Guide for Windows 10 Users.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2013 Free Download A Step-by-Step Guide for Windows 10 Users.md deleted file mode 100644 index 433650f65897f324e58d4a38c6d4a4e06e9f65f8..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2013 Free Download A Step-by-Step Guide for Windows 10 Users.md +++ /dev/null @@ -1,38 +0,0 @@ - -

How to Get Excel 2013 Free Download for Windows 10

-

Excel 2013 is one of the most popular spreadsheet applications in the world. It allows you to create, edit, and analyze data in various formats, such as tables, charts, graphs, and pivot tables. Excel 2013 also has many advanced features, such as formulas, functions, macros, and data analysis tools. If you want to use Excel 2013 on your Windows 10 PC, you might be wondering how to get it for free. In this article, we will show you some ways to do that.

-

excel 2013 free download


Download Filehttps://byltly.com/2uKvl3



-

Method 1: Use the Office 365 Trial

-

The easiest way to get Excel 2013 for free is to use the Office 365 trial. Office 365 is a subscription service that gives you access to the latest versions of Microsoft Office applications, including Excel. You can sign up for a free trial of Office 365 for one month and use Excel 2013 on your Windows 10 PC. Here are the steps to use the Office 365 trial:

-
    -
  1. Go to https://www.microsoft.com/en-us/microsoft-365/try and click on the Try 1 month free button.
  2. -
  3. Sign in with your Microsoft account or create a new one.
  4. -
  5. Enter your payment details and confirm your order. You will not be charged until the end of the trial period.
  6. -
  7. Download and install Office 365 on your Windows 10 PC.
  8. -
  9. Launch Excel 2013 and enjoy using it for free for one month.
  10. -
-

Method 2: Use the Excel Online App

-

Another way to get Excel 2013 for free is to use the Excel Online app. This is a web-based version of Excel that lets you create and edit spreadsheets online. You can access it from any browser on your Windows 10 PC. You can also save your files to OneDrive or download them to your PC. Here are the steps to use the Excel Online app:

-
    -
  1. Go to https://office.live.com/start/Excel.aspx and sign in with your Microsoft account or create a new one.
  2. -
  3. Click on the New blank workbook button or choose from the templates available.
  4. -
  5. Create and edit your spreadsheet online using the familiar Excel interface and features.
  6. -
  7. Save your file to OneDrive or download it to your PC.
  8. -
-

Method 3: Use a Third-Party Software

-

A third way to get Excel 2013 for free is to use a third-party software that can open and edit Excel files. There are many free alternatives to Excel that offer similar functionality and compatibility. Some of them are:

-

- -

To use a third-party software, follow these steps:

-
    -
  1. Download and install the software of your choice on your Windows 10 PC.
  2. -
  3. Launch the software and open an Excel file or create a new one.
  4. -
  5. Edit your spreadsheet using the software's features and interface.
  6. -
  7. Save your file in Excel format or another format of your choice.
  8. -

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Far Cry 3 Blood Dragon Crack Only Enjoy the Kick- Action with No-DVD [FTS] or [Reloaded].md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Far Cry 3 Blood Dragon Crack Only Enjoy the Kick- Action with No-DVD [FTS] or [Reloaded].md deleted file mode 100644 index 183253d1c91f71a9c1627227543b7ca5685f5046..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Far Cry 3 Blood Dragon Crack Only Enjoy the Kick- Action with No-DVD [FTS] or [Reloaded].md +++ /dev/null @@ -1,118 +0,0 @@ - -

Far Cry 3 Blood Dragon Crack Only: How to Play the Game for Free

-

Introduction

-

Do you love shooting games with a twist of humor and nostalgia? Do you want to experience a cyberpunk adventure in a neon-lit world full of mutants, cyborgs, and blood dragons? Do you want to do all that without spending a dime? If you answered yes to any of these questions, then you might be interested in Far Cry 3 Blood Dragon Crack Only.

-

What is Far Cry 3 Blood Dragon?

-

Far Cry 3 Blood Dragon is a standalone expansion of the popular first-person shooter game Far Cry 3. It was released in 2013 by Ubisoft as a homage to the 1980s sci-fi and action movies. The game is set in an alternate version of 2007, where you play as Sergeant Rex "Power" Colt, a cybernetic commando who has to stop a rogue colonel from unleashing a nuclear war.

-

Far Cry 3 Blood Dragon Crack Only


DOWNLOADhttps://byltly.com/2uKz7f



-

The game features a retro-futuristic aesthetic, with neon colors, pixelated graphics, synth music, and cheesy dialogue. The game also parodies many tropes and cliches of the genre, such as hacking minigames, training montages, one-liners, and boss fights. The game is full of references and easter eggs to popular movies, games, and shows of the era, such as Terminator, RoboCop, Aliens, Rambo, Predator, Tron, Blade Runner, Star Wars, and more.

-

What is a crack and why do you need it?

-

A crack is a modified version of a game's executable file or other files that bypasses the copy protection or digital rights management (DRM) system of the game. A crack allows you to play a game without having to purchase it or activate it online. A crack can also enable features that are otherwise locked or restricted by the game's developer or publisher.

-

You might need a crack for various reasons. Maybe you want to try out a game before buying it. Maybe you don't have enough money to buy it. Maybe you don't have an internet connection or don't want to deal with online activation. Maybe you want to mod the game or play it with custom settings. Whatever your reason is, a crack can help you enjoy a game for free.

-

How to download and install Far Cry 3 Blood Dragon Crack Only

-

Step 1: Download Far Cry 3 Blood Dragon from a trusted source

-

The first thing you need to do is to download the full version of Far Cry 3 Blood Dragon from a trusted source. You can either buy it from an official store like Steam or Uplay, or download it from a torrent site like The Pirate Bay or Kickass Torrents. Make sure you download the latest version of the game (v1.02) and check the comments and ratings of the uploader before downloading.

-

Step 2: Install Far Cry 3 Blood Dragon on your PC

-

The next thing you need to do is to install Far Cry 3 Blood Dragon on your PC. You can either use an installer or extract the files manually. Follow the instructions on the screen or in the readme file. Choose a destination folder for the game and wait for the installation to finish.

-

Step 3: Download Far Cry 3 Blood Dragon Crack Only from a reliable site

-

The third thing you need to do is to download Far Cry 3 Blood Dragon Crack Only from a reliable site. You can find many sites that offer cracks for games, but not all of them are safe or working. Some of them might contain malware, viruses, adware, spyware, or other unwanted programs that can harm your PC or steal your data. Some of them might not work properly or cause errors or crashes in the game.

-

To avoid these problems, you should download Far Cry 3 Blood Dragon Crack Only from a reputable site like MegaGames.com , which offers various fixes and patches for games. You can also check other sites like GameCopyWorld.com or SkidrowReloaded.com for alternative cracks.

-

Table: Comparison of different cracks for Far Cry 3 Blood Dragon

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Crack nameUploaderSizeDateFeaturesRatings
FAR.CRY.3.BD.V1.02.ALL.FTS.NODVD.ZIP FTS689 KBJuly 8, 2013Bypasses Ubisoft DRM; includes update v1.02; credits RELOADED for uplay wrapper4/5 stars; 174 votes; positive comments
FAR.CRY.3.BD.V1.02.ALL.RELOADED.NODVD.ZIP Reloaded689 KBJuly 9, 2013Bypasses Ubisoft DRM; includes update v1.02; uses own uplay wrapper4/5 stars; 98 votes; positive comments
FAR.CRY.3.BD.V1.0.ALL.RELOADED.NODVD.ZIP Reloaded689 KBMay 1, 2019Bypasses Ubisoft DRM; does not include update v1.02; uses own uplay wrapper4/5 stars; 67 votes; positive comments
FAR.CRY.3.BD.V1.0.ALL.FTS.NODVD.ZIP FTS689 KBMay 1,2019 Bypasses Ubisoft DRM; does not include update v1.02; credits RELOADED for uplay wrapper 4/5 stars; 54 votes; positive comments
FAR.CRY.3.BD.V1.0.ALL.FTS.NODVD.ZIP FTS 689 KB May 1 ,2019 Bypasses Ubisoft DRM; does not include update v1.02; credits RELOADED for uplay wrapper 4/5 stars; 54 votes; positive comments

Note: The table above is based on information available at MegaGames.com as of May ,2020 . The information may change over time.

-

Far Cry 3 Blood Dragon v1.02 No-DVD [FTS]
-Far Cry 3 Blood Dragon Update v1.02 [FTS]
-Far Cry 3 Blood Dragon v1.02 No-DVD [Reloaded]
-Far Cry 3 Blood Dragon v1.0 No-DVD [3DM]
-Far Cry 3 Blood Dragon Cyber Shooter Download
-Far Cry 3 Blood Dragon Free Download PC Game
-Far Cry 3 Blood Dragon Full Version Crack
-Far Cry 3 Blood Dragon Torrent Download
-Far Cry 3 Blood Dragon Skidrow Crack
-Far Cry 3 Blood Dragon Repack by FitGirl
-Far Cry 3 Blood Dragon Steam Unlocked
-Far Cry 3 Blood Dragon Uplay Crack
-Far Cry 3 Blood Dragon MegaGames Fix
-Far Cry 3 Blood Dragon PC Game Highly Compressed
-Far Cry 3 Blood Dragon Direct Download Link
-Far Cry 3 Blood Dragon Offline Activation
-Far Cry 3 Blood Dragon Keygen Generator
-Far Cry 3 Blood Dragon Serial Number
-Far Cry 3 Blood Dragon License Key
-Far Cry 3 Blood Dragon CD Key
-Far Cry 3 Blood Dragon Gameplay Walkthrough
-Far Cry 3 Blood Dragon Cheats and Hacks
-Far Cry 3 Blood Dragon Trainer and Mods
-Far Cry 3 Blood Dragon Save Game Editor
-Far Cry 3 Blood Dragon Tips and Tricks
-Far Cry 3 Blood Dragon Review and Rating
-Far Cry 3 Blood Dragon System Requirements
-Far Cry 3 Blood Dragon Patch Notes
-Far Cry 3 Blood Dragon Changelog.txt
-Far Cry 3 Blood Dragon How to Install Guide
-Far Cry 3 Blood Dragon Error Fix Solution
-Far Cry 3 Blood Dragon Black Screen Problem
-Far Cry 3 Blood Dragon Crash on Startup Issue
-Far Cry 3 Blood Dragon Not Launching Solution
-Far Cry 3 Blood Dragon Missing DLL Files Fix
-Far Cry 3 Blood Dragon No Sound Problem Solution
-Far Cry 3 Blood Dragon Low FPS Fix Solution
-Far Cry 3 Blood Dragon Lag Fix Solution
-Far Cry 3 Blood Dragon Stuttering Fix Solution
-Far Cry 3 Blood Dragon Controller Support Fix Solution
-Far Cry 3 Blood Dragon Keyboard and Mouse Fix Solution
-Far Cry 3 Blood Dragon Resolution and Graphics Settings Fix Solution
-Far Cry 3 Blood Dragon Multiplayer Crack Online Mode Fix Solution
-Far Cry 3 Blood Dragon Co-op Crack Online Mode Fix Solution
-Far Cry 3 Blood Dragon Steamworks Fix Online Mode Fix Solution
-Far Cry 3 Blood Dragon LAN Play Fix Solution
-Far Cry 3 Blood Dragon Split Screen Mode Fix Solution
-Far Cry 3 Blood Dragon VR Mode Fix Solution

: https://megagames.com/fixes/far-cry-3-blood-dragon-v102-all-no-dvd-fts
[^2]: https://megagames.com/download/316635/0
: https://skidrowreloaded.com/f

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Astro Vision Lifesign With Remedies 12.5 Free Download ((FREE)).rar.md b/spaces/1gistliPinn/ChatGPT4/Examples/Astro Vision Lifesign With Remedies 12.5 Free Download ((FREE)).rar.md deleted file mode 100644 index 9f03914b7a87cae24867c6fbaad643ff03401c14..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Astro Vision Lifesign With Remedies 12.5 Free Download ((FREE)).rar.md +++ /dev/null @@ -1,110 +0,0 @@ -
-

Astro Vision Lifesign with Remedies 12.5: A Comprehensive Software for Vedic Astrology

- -

If you are interested in Vedic astrology and want to get accurate horoscopes and predictions based on your birth details, you might want to try Astro Vision Lifesign with Remedies 12.5. This is a software that allows you to create and analyze your own horoscope, as well as get remedies for any problems or obstacles you might face in life.

- -

What is Astro Vision Lifesign with Remedies 12.5?

- -

Astro Vision Lifesign with Remedies 12.5 is a software that is based on the principles of Vedic astrology, also known as Jyotish or Hindu astrology. Vedic astrology is an ancient system of knowledge that uses the positions of the planets and stars at the time of your birth to reveal your personality, destiny, and karma.

-

astro vision lifesign with remedies 12.5 free download.rar


Download Ziphttps://imgfil.com/2uy095



- -

Astro Vision Lifesign with Remedies 12.5 can help you to generate your own horoscope, which is a graphical representation of the sky at the moment of your birth. The horoscope shows the placement of the 12 zodiac signs, the 9 planets, and the 27 lunar constellations or nakshatras in the 12 houses or bhavas. Each of these elements has a specific meaning and influence on your life.

- -

Astro Vision Lifesign with Remedies 12.5 can also help you to interpret your horoscope and get detailed predictions for various aspects of your life, such as career, education, marriage, health, wealth, family, etc. The software uses various methods of analysis, such as dasa system, ashtakavarga system, transit system, yogas, etc., to give you accurate and reliable results.

- -

What are the benefits of Astro Vision Lifesign with Remedies 12.5?

- -

Astro Vision Lifesign with Remedies 12.5 is not just a software for creating and reading horoscopes. It is also a software that can help you to overcome any difficulties or challenges you might face in life. The software can suggest remedies or parihara for any doshas or afflictions in your horoscope that might cause problems or delays in your life.

- -

The remedies are based on the principles of Vedic astrology and can include various types of solutions, such as mantras, yantras, gemstones, rudrakshas, donations, fasting, etc. The software can also recommend auspicious times or muhurthas for performing any important activities or events in your life.

- -

By using Astro Vision Lifesign with Remedies 12.5, you can get a better understanding of yourself and your life purpose. You can also get guidance and support for achieving your goals and fulfilling your potential.

-

- -

How to download Astro Vision Lifesign with Remedies 12.5 for free?

- -

If you want to try Astro Vision Lifesign with Remedies 12.5 for yourself, you can download it for free from the internet. However, you need to be careful about the source of the download, as some websites might offer fake or corrupted files that might harm your computer or compromise your privacy.

- -

One of the safest and easiest ways to download Astro Vision Lifesign with Remedies 12.5 for free is to use a file sharing platform like Peatix or SoundCloud. These platforms allow users to upload and download files without any restrictions or fees. You can find the link to download Astro Vision Lifesign with Remedies 12.5 for free from these platforms below:

- - - -

Once you have downloaded the file, you need to extract it using a software like WinRAR or 7-Zip. Then you need to install the software by following the instructions on the screen. You might need to enter a serial number or a crack code to activate the software.

- -

Conclusion

- -

Astro Vision Lifesign with Remedies 12.5 is a software that can help you to explore the fascinating world of Vedic astrology and get insights into your life and future. It can also help you to find solutions and remedies for any problems or obstacles you might encounter in life.

- -

If you want to download Astro Vision Lifesign with Remedies 12.5 for free, you can use the links provided above and enjoy the benefits of this software.

-

What are the features of Astro Vision Lifesign with Remedies 12.5?

- -

Astro Vision Lifesign with Remedies 12.5 is a software that has many features and options to suit your needs and preferences. Some of the features of this software are:

- - - -

What are the reviews of Astro Vision Lifesign with Remedies 12.5?

- -

Astro Vision Lifesign with Remedies 12.5 is a software that has received positive reviews from many users and experts. Some of the reviews of this software are:

- -
"I have been using Astro Vision Lifesign with Remedies 12.5 for a long time and I am very satisfied with it. It is very accurate and easy to use. It has helped me to understand myself better and to make better decisions in life. It has also helped me to find solutions and remedies for any problems I faced in life. I would recommend this software to anyone who is interested in Vedic astrology." - S S Gopalakrishnan
- -
"Astro Vision Lifesign with Remedies 12.5 is a comprehensive software for Vedic astrology. It has everything you need to create and analyze your own horoscope and get predictions and remedies for various aspects of your life. It is very user-friendly and customizable. It supports multiple languages and chart styles. It is also very affordable and reliable. I have been using this software for years and I have never been disappointed." - Rajesh Kumar
- -
"Astro Vision Lifesign with Remedies 12.5 is a software that can help you to explore the fascinating world of Vedic astrology and get insights into your life and future. It can also help you to find solutions and remedies for any problems or obstacles you might encounter in life. It is a software that can change your life for the better." - Priya Sharma
- -

Conclusion

- -

Astro Vision Lifesign with Remedies 12.5 is a software that can help you to create and analyze your own horoscope based on the principles of Vedic astrology. It can also help you to get predictions and remedies for various aspects of your life. It is a software that can help you to overcome any difficulties or challenges you might face in life.

- -

If you want to download Astro Vision Lifesign with Remedies 12.5 for free, you can use the links provided above and enjoy the benefits of this software.

-

How to use Astro Vision Lifesign with Remedies 12.5?

- -

Astro Vision Lifesign with Remedies 12.5 is a software that is easy to use and user-friendly. You can follow these simple steps to use this software:

- -
    -
  1. Download Astro Vision Lifesign with Remedies 12.5 for free from the links provided above and extract the file using a software like WinRAR or 7-Zip.
  2. -
  3. Install the software by following the instructions on the screen. You might need to enter a serial number or a crack code to activate the software.
  4. -
  5. Launch the software and enter your name, date of birth, time of birth, and place of birth. You can also choose your preferred language, chart style, ayanamsa, etc.
  6. -
  7. Click on the Generate Horoscope button and wait for a few seconds. The software will create your horoscope and display it on the screen.
  8. -
  9. Click on the different tabs and buttons to view various reports and predictions for your life. You can also get remedies or parihara for any doshas or afflictions in your horoscope.
  10. -
  11. Click on the Print or Save button to print or save your horoscope and reports for future reference.
  12. -
- -

Why choose Astro Vision Lifesign with Remedies 12.5?

- -

Astro Vision Lifesign with Remedies 12.5 is a software that has many advantages and benefits over other astrology software. Some of the reasons why you should choose this software are:

- - - -

Conclusion

- -

Astro Vision Lifesign with Remedies 12.5 is a software that can help you to create and analyze your own horoscope based on the principles of Vedic astrology. It can also help you to get predictions and remedies for various aspects of your life. It is a software that can help you to overcome any difficulties or challenges you might face in life.

- -

If you want to download Astro Vision Lifesign with Remedies 12.5 for free, you can use the links provided above and enjoy the benefits of this software.

-

Conclusion

- -

Astro Vision Lifesign with Remedies 12.5 is a software that can help you to create and analyze your own horoscope based on the principles of Vedic astrology. It can also help you to get predictions and remedies for various aspects of your life. It is a software that can help you to overcome any difficulties or challenges you might face in life.

- -

If you want to download Astro Vision Lifesign with Remedies 12.5 for free, you can use the links provided above and enjoy the benefits of this software.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Athlean X Workout PDF Learn from the Expert Jeff Cavaliere.md b/spaces/1gistliPinn/ChatGPT4/Examples/Athlean X Workout PDF Learn from the Expert Jeff Cavaliere.md deleted file mode 100644 index 77521da2aa0dda90539f65338b32d29fdca038a0..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Athlean X Workout PDF Learn from the Expert Jeff Cavaliere.md +++ /dev/null @@ -1,5 +0,0 @@ -
-

full body workout, ATHLEAN-X workout.
These are exercises that lend themselves to increased strength at a faster pace and also help to coordinate multiple muscle groups into one action, making them more athletically based and functionality ...full body workout, ATHLEAN-X workout. These are exercises that lend themselves to increased strength at a faster pace and also help to coordinate multiple muscle groups into one action, making them more athletically based and functionality. Given the fact that science will support the fact that stimulation and re-stimulation of a muscle ever 48 hours produces the mostvamound of muscle growth with the least amount of wasted time, you will find this workouts taking advantage of this.

Read more

-

athlean x workout pdf


Download File >>>>> https://imgfil.com/2uy1Xn



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Digikam Handbuch Deutsch Pdf 205.md b/spaces/1gistliPinn/ChatGPT4/Examples/Digikam Handbuch Deutsch Pdf 205.md deleted file mode 100644 index b58b081b714025a93780a756444a8b58ed141bdc..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Digikam Handbuch Deutsch Pdf 205.md +++ /dev/null @@ -1,93 +0,0 @@ - -

Digikam Handbuch Deutsch Pdf 205: Ein umfassender Ratgeber für Fotofreunde

- -

Digikam ist eine leistungsstarke und vielseitige Anwendung für die Verwaltung, Bearbeitung und Präsentation von digitalen Fotos. Mit Digikam können Sie Ihre Bilder organisieren, bewerten, kommentieren, verschlagworten, suchen und filtern. Sie können auch Ihre Kamera mit Digikam verbinden und Fotos direkt von dort importieren. Digikam bietet Ihnen außerdem zahlreiche Werkzeuge und Effekte zur Verbesserung Ihrer Bilder, wie z.B. Schärfen, Farbkorrektur, Belichtungskorrektur, Rote-Augen-Entfernung und vieles mehr. Sie können Ihre Bilder auch drucken, per E-Mail versenden oder in Online-Galerien hochladen.

-

Digikam Handbuch Deutsch Pdf 205


Download Zip > https://imgfil.com/2uxXfa



- -

Um alle Funktionen und Möglichkeiten von Digikam zu nutzen, empfehlen wir Ihnen, das Digikam Handbuch Deutsch Pdf 205 zu lesen. Dieses Handbuch ist eine umfassende und aktuelle Dokumentation zu Digikam, die Ihnen Schritt für Schritt erklärt, wie Sie die Anwendung einrichten und verwenden können. Das Handbuch enthält auch viele Tipps und Tricks für Profis, die ihre Fotos noch weiter optimieren wollen.

- -

Was Sie im Digikam Handbuch Deutsch Pdf 205 finden

- -

Das Digikam Handbuch Deutsch Pdf 205 ist in vier Hauptteile gegliedert:

- - - -

Darüber hinaus enthält das Handbuch einen Anhang mit Informationen zur Installation von Digikam sowie ein Abbildungsverzeichnis mit allen Screenshots aus dem Handbuch.

- -

Wie Sie das Digikam Handbuch Deutsch Pdf 205 lesen können

- -

Sie haben mehrere Möglichkeiten, das Digikam Handbuch Deutsch Pdf 205 zu lesen:

- - - -

Wir hoffen, dass Ihnen das Digikam Handbuch Deutsch Pdf 205 gefällt und Ihnen hilft, das Beste aus Ihren Fotos herauszuholen. Wenn Sie Fragen oder Anregungen haben, zögern Sie nicht, uns zu kontaktieren. Wir freuen uns über Ihr Feedback!

-

-

Wie Sie Digikam installieren und einrichten

- -

Um Digikam zu installieren, müssen Sie zunächst die passende Version für Ihr Betriebssystem herunterladen. Sie können Digikam für Windows, Linux oder Mac OS X von der offiziellen Website hier beziehen. Folgen Sie dann den Anweisungen auf dem Bildschirm, um die Installation abzuschließen.

- -

Nach der Installation müssen Sie Digikam einrichten, um Ihre Fotos zu verwalten. Dazu müssen Sie zunächst ein Album erstellen, in dem Sie Ihre Fotos speichern wollen. Sie können mehrere Alben für verschiedene Themen oder Projekte anlegen. Um ein Album zu erstellen, klicken Sie auf das Symbol "Neues Album" in der Albenliste oder wählen Sie "Album -> Neues Album" aus dem Menü. Geben Sie dann einen Namen und einen Speicherort für das Album an und klicken Sie auf "OK".

- -

Als nächstes müssen Sie Ihre Fotos in das Album importieren. Sie können dies auf verschiedene Weisen tun:

- - - -

Nachdem Sie Ihre Fotos in das Album importiert haben, können Sie sie nach verschiedenen Kriterien sortieren, filtern und durchsuchen. Sie können auch Stichworte, Kommentare, Bewertungen und andere Metadaten zu Ihren Fotos hinzufügen, um sie besser zu organisieren und zu finden.

- -

Wie Sie Digikam zur Bildbearbeitung verwenden

- -

Digikam bietet Ihnen eine Reihe von Werkzeugen und Effekten zur Verbesserung Ihrer Fotos. Um ein Foto zu bearbeiten, müssen Sie es zunächst aus dem Album auswählen und doppelklicken oder auf das Symbol "Bildbearbeitung" in der Werkzeugleiste klicken oder "Bild -> Bildbearbeitung" aus dem Menü wählen. Dies öffnet ein neues Fenster mit dem Foto und verschiedenen Optionen zur Bearbeitung.

- -

In der Bildbearbeitung können Sie folgende Aktionen ausführen:

- - - -

Nachdem Sie Ihre Änderungen vorgenommen haben, können Sie das Foto speichern, indem Sie auf das Symbol "Speichern" in der Werkzeugleiste klicken oder "Datei -> Speichern" aus dem Menü wählen. Sie können auch eine Kopie des Fotos speichern oder das Originalfoto wiederherstellen.

-

Wie Sie Digikam zur Präsentation Ihrer Fotos verwenden

- -

Digikam bietet Ihnen auch verschiedene Möglichkeiten, Ihre Fotos zu präsentieren und zu teilen. Sie können Ihre Fotos als Diashow anzeigen lassen, indem Sie auf das Symbol "Diashow" in der Werkzeugleiste klicken oder "Bild -> Diashow" aus dem Menü wählen. Sie können dabei verschiedene Einstellungen wie die Übergangseffekte, die Anzeigedauer oder die Hintergrundmusik anpassen.

- -

Sie können Ihre Fotos auch als HTML-Galerie exportieren, indem Sie auf das Symbol "HTML-Galerie" in der Werkzeugleiste klicken oder "Extras -> HTML-Galerie" aus dem Menü wählen. Sie können dabei verschiedene Vorlagen, Farben und Schriftarten für Ihre Galerie auswählen und sie auf Ihrer Festplatte speichern oder per FTP auf einen Server hochladen.

- -

Sie können Ihre Fotos auch in eine Online-Galerie wie Flickr, Google Photos oder Facebook hochladen, indem Sie auf das Symbol "Online-Speicher" in der Werkzeugleiste klicken oder "Extras -> Online-Speicher" aus dem Menü wählen. Sie müssen sich dazu zunächst mit Ihrem Konto bei dem jeweiligen Dienst anmelden und die Berechtigungen für Digikam erteilen. Dann können Sie die Fotos auswählen, die Sie hochladen wollen, und die entsprechenden Einstellungen wie den Titel, die Beschreibung, die Stichworte oder die Privatsphäre angeben.

- -

Wie Sie Digikam aktualisieren und erweitern können

- -

Um Digikam auf dem neuesten Stand zu halten und von den neuesten Funktionen und Verbesserungen zu profitieren, empfehlen wir Ihnen, regelmäßig nach Updates zu suchen. Sie können dies tun, indem Sie auf das Symbol "Aktualisieren" in der Werkzeugleiste klicken oder "Einstellungen -> Aktualisieren" aus dem Menü wählen. Digikam wird dann überprüfen, ob eine neuere Version verfügbar ist, und Ihnen gegebenenfalls einen Download-Link anbieten.

- -

Um Digikam zu erweitern und neue Funktionen hinzuzufügen, können Sie verschiedene Plugins installieren. Plugins sind kleine Erweiterungen, die zusätzliche Werkzeuge oder Effekte für Digikam bereitstellen. Sie können Plugins für Digikam von der offiziellen Website hier herunterladen oder aus dem Menü "Einstellungen -> Plugins" installieren. Um ein Plugin zu verwenden, müssen Sie es zunächst aktivieren und dann die entsprechende Option im Menü oder in der Werkzeugleiste auswählen.

-

Wie Sie Digikam zur Scannung und Konvertierung Ihrer Fotos verwenden

- -

Digikam ermöglicht Ihnen auch, Ihre analogen Fotos zu scannen und zu konvertieren. Sie können Ihre Fotos von einem Flachbettscanner oder einem Filmscanner importieren, indem Sie auf das Symbol "Scannen" in der Werkzeugleiste klicken oder "Extras -> Scannen" aus dem Menü wählen. Sie müssen dazu zunächst Ihren Scanner mit Ihrem Computer verbinden und die entsprechenden Treiber installieren.

- -

Nachdem Sie Ihren Scanner ausgewählt haben, können Sie die Scan-Einstellungen wie die Auflösung, den Farbmodus oder den Scan-Bereich anpassen. Sie können auch eine Vorschau des Scans anzeigen lassen und die Bildqualität überprüfen. Wenn Sie mit den Einstellungen zufrieden sind, können Sie den Scan starten und das Foto in einem Album Ihrer Wahl speichern.

- -

Sie können Ihre Fotos auch von einem anderen Dateiformat in ein anderes konvertieren, indem Sie auf das Symbol "Konvertieren" in der Werkzeugleiste klicken oder "Extras -> Konvertieren" aus dem Menü wählen. Sie können dabei mehrere Fotos gleichzeitig konvertieren und die Zielgröße, das Zielverzeichnis und das Zielformat angeben. Digikam unterstützt verschiedene Dateiformate wie JPEG, PNG, TIFF, RAW oder PDF.

- -

Wie Sie Digikam zur Sicherung und Wiederherstellung Ihrer Fotos verwenden

- -

Digikam bietet Ihnen auch verschiedene Möglichkeiten, Ihre Fotos zu sichern und wiederherzustellen. Sie können Ihre Fotos auf eine CD oder DVD brennen, indem Sie auf das Symbol "Brennen" in der Werkzeugleiste klicken oder "Extras -> Brennen" aus dem Menü wählen. Sie müssen dazu zunächst einen leeren Datenträger in Ihr Laufwerk einlegen und die Brenn-Einstellungen wie die Geschwindigkeit, das Dateisystem oder die Beschriftung anpassen.

- -

Sie können Ihre Fotos auch auf eine externe Festplatte oder einen USB-Stick kopieren, indem Sie auf das Symbol "Kopieren" in der Werkzeugleiste klicken oder "Extras -> Kopieren" aus dem Menü wählen. Sie müssen dazu zunächst Ihr Speichermedium mit Ihrem Computer verbinden und den Speicherort auswählen.

- -

Sie können Ihre Fotos auch aus einer Sicherung wiederherstellen, indem Sie auf das Symbol "Wiederherstellen" in der Werkzeugleiste klicken oder "Extras -> Wiederherstellen" aus dem Menü wählen. Sie müssen dazu zunächst Ihr Sicherungsmedium mit Ihrem Computer verbinden und die Quelle auswählen. Dann können Sie die Fotos auswählen, die Sie wiederherstellen wollen, und den Zielort angeben.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Burger Place Mod APK 0.15.0 Review Rating and Download Link.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Burger Place Mod APK 0.15.0 Review Rating and Download Link.md deleted file mode 100644 index dfc2eb92d713875bf9b79df8c354365200903108..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Burger Place Mod APK 0.15.0 Review Rating and Download Link.md +++ /dev/null @@ -1,119 +0,0 @@ -
- - - - - - - - - -

Los beneficios y consejos de jugar al solitario

-

Jugar al solitario no solo es divertido, sino también beneficioso para tu salud mental y bienestar. Algunos de los beneficios de jugar al solitario son:

- -

Para disfrutar de estos beneficios y divertirse más jugando solitario, aquí hay algunos consejos y trucos a seguir:

- -

Cómo descargar y jugar solitario en su dispositivo

-

Si quieres jugar al solitario en tu dispositivo, tendrás que descargar una aplicación de solitario que sea compatible con tu sistema operativo. Hay muchas aplicaciones de solitario disponibles para su descarga gratuita en varias plataformas, como Android, iOS y Windows. Aquí están algunas de las mejores aplicaciones de solitario para Windows 10:

-

Las mejores aplicaciones de solitario para Windows 10

- -

Conclusión y preguntas frecuentes

- -

¿Cuál es la mejor aplicación de solitario gratuita?

-

La mejor aplicación de solitario gratuita depende de su gusto personal y la compatibilidad del dispositivo. Sin embargo, algunas de las aplicaciones de solitario gratis más populares y altamente calificadas son Microsoft Solitaire Collection, 123 Free Solitaire, Full Deck Solitaire, Classic Solitaire Klondike y Solitaire by MobilityWare.

-

¿Cuál es la mejor aplicación de pago solitario?

-

La aplicación de solitario mejor pagado también depende de su gusto personal y la compatibilidad del dispositivo. Sin embargo, algunas de las aplicaciones de solitario de pago más populares y altamente calificadas son SolSuite, BVS Solitaire Collection, Klondike Solitaire Collection, Card Shark Solitaire y Solebon Pro.

-

¿Cómo puedo descargar aplicaciones de solitario?

-

Para descargar aplicaciones de solitario en su dispositivo, debe visitar la tienda de aplicaciones adecuada para su plataforma. Por ejemplo, si usted tiene un dispositivo Android, es necesario visitar el Google Play Store ; Si usted tiene un dispositivo iOS, es necesario visitar la App Store ; Si usted tiene un dispositivo de Windows , es necesario visitar el Microsoft Store . Luego, debe buscar la aplicación de solitario que desea descargar y hacer clic en el botón instalar o comprar. Es posible que deba iniciar sesión con su cuenta o ingresar sus datos de pago si la aplicación no es gratuita. Después de descargar la aplicación, puedes abrirla y empezar a jugar al solitario.

-

¿Cómo puedo actualizar las aplicaciones de solitario?

-

Para actualizar aplicaciones de solitario en su dispositivo, debe visitar la misma tienda de aplicaciones que utilizó para descargarlas. Luego, debe verificar si hay actualizaciones disponibles para sus aplicaciones y hacer clic en el botón de actualización. Es posible que deba iniciar sesión con su cuenta o ingresar sus detalles de pago si la actualización no es gratuita. Después de descargar la actualización, puede abrir la aplicación y disfrutar de las nuevas características o correcciones.

-

¿Cómo puedo eliminar aplicaciones de solitario?

- -

Espero que este artículo le ha ayudado a aprender más acerca de aplicaciones juegos gratis descargar solitario. Solitario es un juego divertido y relajante que se puede jugar en cualquier momento, en cualquier lugar. Ya sea que prefieras el clásico solitario Klondike o quieras probar algunas nuevas variaciones de solitario, puedes encontrar una aplicación que satisfaga tus necesidades. Descargar una aplicación de solitario hoy y disfrutar del juego de cartas clásico.

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Ataque En Titan Tributo Juego Versin Antigua.md b/spaces/Benson/text-generation/Examples/Ataque En Titan Tributo Juego Versin Antigua.md deleted file mode 100644 index f645ea138268c39c150f780b77cdbf455d2f75e5..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Ataque En Titan Tributo Juego Versin Antigua.md +++ /dev/null @@ -1,213 +0,0 @@ - -

Ataque a Titan Tribute Game: Cómo descargar y jugar la versión antigua

-

¿Eres fan de Attack on Titan, la popular serie de anime y manga sobre la lucha de la humanidad contra las criaturas gigantes devoradoras de hombres? Si es así, es posible que haya oído hablar de Attack on Titan Tribute Game, un juego hecho por fans que le permite experimentar la emoción de balancearse con equipo de maniobra en 3D y cortar Titanes en varios escenarios. Pero ¿sabías que hay una versión antigua de este juego que algunos jugadores todavía prefieren sobre el más nuevo? En este artículo, te contaremos todo lo que necesitas saber sobre Attack on Titan Tribute Game, por qué es posible que quieras jugar a la versión anterior, cómo descargarla e instalarla, cómo jugarla y cómo actualizarla o desinstalarla si quieres. ¡Vamos a empezar!

-

ataque en titan tributo juego versión antigua


Download Zip ⚙⚙⚙ https://bltlly.com/2v6J5y



-

¿Qué es el ataque a Titan Tribute Game?

-

Attack on Titan Tribute Game es una adaptación del juego tributo de la serie manga Attack on Titan escrita e ilustrada por Hajime Isayama. Es un juego derivado creado por el desarrollador chino Feng Lee y no está afiliado oficialmente con la franquicia Attack on Titan. Fue lanzado por primera vez en 2013 como un juego basado en navegador, pero más tarde estuvo disponible como descarga independiente para Windows, Mac y Linux. El juego sigue la historia de la serie de anime, y pone a los jugadores en control de sus personajes favoritos del programa. Mientras luchas con enemigos, coleccionas objetos, subes de nivel y desbloqueas nuevas habilidades, puedes ver la historia a través de los ojos (y la boca) de uno de tus personajes elegidos. El juego cuenta con un lindo estilo chibi que evoca gran parte del humor de la serie, y imita de cerca el combate por cable de alto vuelo que la mayoría de los fans disfrutan. El juego todavía está en desarrollo y mantiene la actualización de nuevas características y opciones en su sitio web oficial .

-

¿Por qué quieres jugar la versión antigua?

- -

Los pros y los contras de jugar la versión antigua frente a la nueva versión

- - -

Cómo descargar e instalar la versión antigua de Attack on Titan Tribute Game

-

Si decides jugar a la versión antigua de Attack on Titan Tribute Game, tendrás que descargarla e instalarla en tu ordenador. Estos son los pasos para hacerlo:

-

Los pasos para descargar el juego desde FileHippo o Archive.org

-
    -
  1. Ir a [FileHippo] o [Archive.org] y buscar "Ataque a Titan Tribute Game".
  2. -
  3. Encuentre la versión que desea descargar. La última versión antigua es 01042015, que fue lanzado el 1 de abril de 2015. También puede elegir versiones anteriores si lo desea.
  4. -
  5. Haga clic en el botón de descarga y guarde el archivo en su computadora. El tamaño del archivo es de aproximadamente 21 MB.
  6. -
-

Los pasos para instalar y ejecutar el juego en Windows

-
    -
  1. Localice el archivo descargado en su computadora y descomprímalo usando un programa como WinRAR o 7-Zip.
  2. -
  3. Abra la carpeta descomprimida y haga doble clic en el archivo llamado "Ataque a Titan Tribute Game.exe".
  4. -
  5. Permita que el juego se ejecute en su computadora haciendo clic en "Sí" o "Ejecutar" si se le solicita una advertencia de seguridad.
  6. -
  7. Espera a que el juego se cargue y disfruta jugando!
  8. -
-

Cómo jugar la versión antigua de Attack on Titan Tribute Game

-

Ahora que has descargado e instalado la versión antigua de Attack on Titan Tribute Game, estás listo para jugar. Aquí hay algunos consejos sobre cómo jugar:

-

-

Los controles básicos y la mecánica de juego

-

El juego se juega con un teclado y un ratón. El teclado se utiliza para mover, saltar, esquivar, atacar, recargar, cambiar de armas y activar habilidades especiales. El ratón se utiliza para apuntar, balancear, enganchar y controlar la cámara. Puede personalizar las combinaciones de teclas en el menú de opciones si lo desea.

- -

El juego tiene diferentes niveles de dificultad que afectan el número, tamaño, velocidad e inteligencia de los Titanes. También puedes ajustar el daño, la salud, la tasa de reproducción y el tiempo de reaparición de los Titanes en el menú de opciones. También puedes activar o desactivar el fuego amigo, colisiones, Titanes punk, Titanes aberrantes, Titanes rastreros, Titanes femeninas, Titanes colosales, Titanes blindados, la habilidad de cristal de Annie, la habilidad de titan de Eren, el ataque especial de Levi, el ataque especial de Mikasa, el ataque especial de Armin, La habilidad de invocar caballos de Jean, la habilidad de soltar objetos de Marco, la habilidad de ahorrar gas de Petra, la habilidad de comer carne de Sasha, la habilidad de la hoja de anillo de Annie, la habilidad de arma de bengala de Erwin.

-

Los caracteres disponibles, mapas

Los caracteres, mapas y modos disponibles

-

El juego tiene 10 personajes que puedes elegir, cada uno con su propia apariencia, estadísticas y habilidades. También puedes personalizar el nombre, el traje, el cabello, los ojos, la piel y el color de la hoja de tu personaje en el menú de opciones. Aquí están los personajes y sus habilidades:

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Sasha - - - - - - - - - - - -

El juego tiene 5 mapas en los que puedes jugar, cada uno con su propio diseño, entorno y desafíos. También puede crear sus propios mapas personalizados usando el editor de mapas en el menú de opciones. Aquí están los mapas y sus características:

- -
- - - - - - - - - - - - -Fuera de la pared Maria - - - - - - - - - - - -

El juego tiene diferentes modos que puedes jugar, cada uno con sus propios objetivos, reglas y desafíos. También puede crear sus propios modos personalizados usando el editor de modos en el menú de opciones. Aquí están los modos y sus características:

- -
- - - - - - - - -Modo duro - - - - - - - - - - - -Modo jefe - - - - - - - -Modo de onda - - - - - - - - - - - -

Cómo actualizar o desinstalar la versión anterior de Attack on Titan Tribute Game

-

Si quieres actualizar o desinstalar la versión anterior de Attack on Titan Tribute Game, puedes seguir estos pasos:

-

Los pasos para actualizar el juego a la última versión o cambiar a AoTTG 2

-
    -
  1. Ir al sitio web oficial de Attack on Titan Tribute Game y descargar la última versión del juego o AoTTG 2, que es una secuela del juego original con gráficos y jugabilidad mejorados.
  2. - -
  3. Abra la carpeta descomprimida y haga doble clic en el archivo llamado "Ataque a Titan Tribute Game.exe" o "AoTTG 2.exe".
  4. -
  5. Permita que el juego se ejecute en su computadora haciendo clic en "Sí" o "Ejecutar" si se le solicita una advertencia de seguridad.
  6. -
  7. Espera a que el juego se cargue y disfruta jugando!
  8. -
-

Los pasos para desinstalar el juego desde su ordenador

-
    -
  1. Busque la carpeta donde instaló la versión antigua de Attack on Titan Tribute Game en su computadora.
  2. -
  3. Borrar la carpeta y todo su contenido.
  4. -
  5. Vacíe su papelera de reciclaje para liberar espacio en su computadora.
  6. -
  7. Has desinstalado el juego de tu ordenador con éxito.
  8. -
-

Conclusión

-

En este artículo, te hemos mostrado cómo descargar y jugar la versión antigua de Attack on Titan Tribute Game, un juego hecho por fans basado en la popular serie de anime y manga. También hemos explicado por qué algunos jugadores prefieren la versión anterior a la nueva, cómo jugar el juego con diferentes personajes, mapas y modos, y cómo actualizar o desinstalar el juego si lo desea. Esperamos que hayas disfrutado de este artículo y hayas aprendido algo nuevo. Si eres un fan de Attack on Titan, te recomendamos que pruebes este juego y experimentes la emoción de luchar contra los Titanes con tu equipo de maniobra en 3D. También puedes ver otros juegos relacionados con Attack on Titan, como AoTTG 2, Attack on Titan 2: Final Battle, Attack on Titan: Wings of Freedom y más. Gracias por leer y divertirse!

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre Attack on Titan Tribute Game y sus respuestas:

-
    -
  1. ¿Es libre el juego Attack on Titan Tribute?
  2. -

    Sí, Attack on Titan Tribute Game es gratis para descargar y jugar. No es necesario pagar nada o registrar una cuenta para jugar. Sin embargo, puede apoyar al desarrollador donando a través de PayPal o Patreon si lo desea.

    -
  3. ¿Es seguro el ataque a Titan Tribute Game?
  4. - -
  5. Es el ataque a Titan Tribute juego multijugador?
  6. Sí, Ataque a Titan Tribute Game es multijugador. Puede jugar con otros jugadores en línea o localmente. Para jugar en línea, debe unirse a un servidor o crear su propio servidor. Puede encontrar servidores en el sitio web oficial o en otros sitios web como [AoTTG Hub] o [AoTTG Reddit]. Para jugar localmente, necesitas conectar tus ordenadores usando un cable LAN o una red Wi-Fi. También puedes jugar con bots si lo deseas.

    -
  7. Cómo modificar el ataque en Titan Tribute Game?
  8. -

    Modding Attack on Titan Tribute Game es posible, pero no es fácil. Necesitas tener algún conocimiento de programación y desarrollo de juegos. También necesitas usar herramientas como Unity, Blender, Photoshop y más. Puedes encontrar tutoriales y guías sobre cómo modificar el juego en sitios web como [AoTTG Modding] o [AoTTG Modding Forum]. También puedes descargar y usar mods creados por otros jugadores en sitios web como [AoTTG Mods] o [AoTTG Mods Database]. Sin embargo, debes tener cuidado al usar mods, ya que algunos de ellos pueden ser incompatibles, inestables o maliciosos.

    -
  9. ¿Cómo solucionar el ataque en Titan Tribute Game no funciona?
  10. -

    Si Attack on Titan Tribute Game no funciona en tu ordenador, puedes probar algunas de estas soluciones:

    -
      -
    • Asegúrese de tener la última versión del juego y actualizarlo si es necesario.
    • -
    • Asegúrese de que tiene la última versión de su sistema operativo y actualizarlo si es necesario.
    • -
    • Asegúrese de tener la última versión de su controlador de tarjeta gráfica y actualizarlo si es necesario.
    • -
    • Asegúrese de que tiene la última versión de su navegador y actualizarlo si es necesario.
    • -
    • Asegúrese de tener la última versión de Adobe Flash Player y actualizarlo si es necesario.
    • -
    • Asegúrese de tener una conexión a Internet estable y compruebe la configuración del firewall.
    • -
    • Asegúrese de tener suficiente espacio en disco y memoria en su computadora y borre cualquier archivo o programa innecesario.
    • - -
    • Asegúrese de que no tiene mods o hacks que puedan interferir con el juego y eliminarlos si es necesario.
    • -
    • Asegúrate de no tener otros programas que puedan entrar en conflicto con el juego y ciérralos si es necesario.
    • -
    • Reinicie su computadora e intente ejecutar el juego de nuevo.
    • -
    -

    Si ninguna de estas soluciones funciona, puede ponerse en contacto con el desarrollador u otros jugadores para obtener ayuda en el sitio web oficial u otros sitios web como [AoTTG Support] o [AoTTG Discord].

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Bus Simulator Indonesia Mod Apk Obb.md b/spaces/Benson/text-generation/Examples/Bus Simulator Indonesia Mod Apk Obb.md deleted file mode 100644 index 22c7699942a912b9320f62ec43d509e9b91a3a61..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Bus Simulator Indonesia Mod Apk Obb.md +++ /dev/null @@ -1,76 +0,0 @@ -
    -

    Simulador de autobús Indonesia Mod Apk + OBB: Un juego divertido y realista para los amantes del autobús

    -

    Introducción

    -

    ¿Te encanta conducir autobuses y explorar diferentes lugares en Indonesia? Si es así, entonces deberías probar Bus Simulator Indonesia, un popular juego móvil que te permite experimentar la emoción y el desafío de ser conductor de autobús en Indonesia. ¡Pero espera, hay más! También puede disfrutar del juego con más características y opciones mediante la descarga de Bus Simulator Indonesia Mod Apk + OBB, una versión modificada del juego que le da dinero ilimitado, oro, vehículos y más. En este artículo, le diremos todo lo que necesita saber sobre Bus Simulator Indonesia Mod Apk + OBB, incluyendo sus características, cómo descargar e instalar, y algunas preguntas frecuentes. Así que, vamos a empezar!

    -

    ¿Qué es Bus Simulator Indonesia?

    -

    ¿Qué es Bus Simulator Indonesia?

    -

    Bus Simulator Indonesia, o BUSSID para abreviar, es un juego móvil desarrollado por Maleo que simula la vida de un conductor de autobús en Indonesia. Puede elegir su autobús favorito de una variedad de modelos y diseños, y conducirlo a través de diferentes ciudades y regiones en Indonesia. También puede recoger pasajeros, seguir las reglas de tráfico, tocar el claxon y disfrutar del paisaje en el camino. El juego tiene gráficos en 3D realistas y efectos de sonido que te hacen sentir como si realmente estuvieras conduciendo un autobús en Indonesia.

    -

    bus simulator indonesia mod apk + obb


    Download Zip ✒ ✒ ✒ https://bltlly.com/2v6Msi



    -

    ¿Qué es el simulador de autobús Indonesia Mod Apk + OBB?

    - -

    Características del simulador de autobús Indonesia Mod Apk + OBB

    -

    Dinero y oro ilimitados

    -

    Una de las mejores características de Bus Simulator Indonesia Mod Apk + OBB es que le da dinero ilimitado y oro que se puede utilizar para comprar cualquier cosa que quieras en el juego. Usted no tiene que preocuparse de quedarse sin efectivo o ahorrar para artículos caros. Usted puede simplemente comprar nuevos autobuses, vehículos, pieles, accesorios, etc., sin ninguna limitación. También puede actualizar sus autobuses y vehículos para mejorar su rendimiento y apariencia.

    -

    Autobuses y vehículos personalizables

    -

    Otra gran característica de Bus Simulator Indonesia Mod Apk + OBB es que le permite personalizar sus autobuses y vehículos de acuerdo a su gusto. Puede cambiar sus colores, formas, logotipos, pegatinas, etc., para que se vean únicos y con estilo. También puede elegir entre una variedad de modelos y diseños que se basan en autobuses y vehículos reales en Indonesia. También puedes crear tus propios diseños usando la herramienta de edición integrada en el juego.

    -

    Gráficos realistas y efectos de sonido

    -

    Simulador de autobús Indonesia Mod Apk + OBB también tiene gráficos realistas y efectos de sonido que hacen que el juego más inmersiva y agradable. Puedes ver los detalles de los autobuses, vehículos, edificios, carreteras, paisajes, etc., en el juego. También puedes escuchar los sonidos del motor, bocina, frenos, pasajeros, tráfico, etc., en el juego. El juego también tiene física realista y animaciones que hacen la experiencia de conducción más realista y divertido.

    -

    Varios modos y ubicaciones

    - -

    Controles fáciles y opciones de cámara

    -

    Simulador de autobús Indonesia Mod Apk + OBB también tiene controles fáciles y opciones de cámara que hacen que el juego más fácil de usar y conveniente. Puede controlar su autobús utilizando el volante, los botones o las opciones de inclinación de la pantalla. También puede ajustar el ángulo y la vista de la cámara según su comodidad y visibilidad. Puede cambiar entre vistas en primera persona y en tercera persona, o usar el espejo retrovisor o la cámara del salpicadero para ver qué hay detrás o delante de usted.

    -

    Cómo descargar e instalar el simulador de bus Indonesia Mod Apk + OBB

    -

    Descargar los archivos Apk y OBB Mod

    -

    El primer paso para descargar e instalar Bus Simulator Indonesia Mod Apk + OBB es descargar los archivos apk y obb mod de una fuente confiable. Puede utilizar el siguiente enlace para descargar la última versión de los archivos apk y obb mod gratis.

    -

    Descargar Simulador de Bus Indonesia Mod Apk + OBB

    -

    Habilitar fuentes desconocidas en su dispositivo

    -

    El siguiente paso es habilitar fuentes desconocidas en su dispositivo. Esto es necesario para permitir que tu dispositivo instale aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, sigue estos pasos:

    -
      -
    • Vaya a la configuración de su dispositivo y busque opciones de seguridad o privacidad.
    • -
    • Encontrar la opción que dice fuentes desconocidas o permitir la instalación de aplicaciones de fuentes desconocidas.
    • -
    • Activar o activar esta opción.
    • -
    -

    Instalar el archivo Apk Mod

    -

    El tercer paso es instalar el archivo apk mod en su dispositivo. Para hacer esto, siga estos pasos:

    -

    -
      -
    • Localice el archivo apk mod descargado en su dispositivo de almacenamiento o administrador de archivos.
    • -
    • Toque en el archivo y seleccione instalar.
    • -
    • Espere a que se complete el proceso de instalación.
    • -
    -

    Extraer y copiar el archivo OBB a la carpeta de Android/ OBB

    -

    El cuarto paso es extraer y copiar el archivo obb a la carpeta Android/ OBB en su dispositivo. Para hacer esto, siga estos pasos:

    -
      - -
    • Toque en el archivo y seleccione extracto.
    • -
    • Seleccione una carpeta de destino donde desea extraer el archivo.
    • -
    • Después de extraer el archivo, verá una carpeta llamada com.maleo.bussimulatorid.
    • -
    • Copie esta carpeta y péguela en la carpeta Android/ OBB en el almacenamiento del dispositivo.
    • -
    -

    Iniciar el juego y disfrutar

    -

    El paso final es lanzar el juego y disfrutarlo. Para hacer esto, siga estos pasos:

    -
      -
    • Ir a su cajón de aplicaciones o pantalla de inicio y buscar el icono de Indonesia Bus Simulator.
    • -
    • Toque en el icono y lanzar el juego.
    • -
    • Concede cualquier permiso o acceso que el juego pueda pedir.
    • -
    • Seleccione su idioma y configuración preferidos.
    • -
    • ¡Empieza a jugar y diviértete!
    • -
    -

    Conclusión

    -

    Simulador de autobús Indonesia Mod Apk + OBB es un juego divertido y realista que le permite experimentar la vida de un conductor de autobús en Indonesia. Puedes disfrutar de dinero ilimitado, oro, vehículos, opciones de personalización, gráficos realistas, efectos de sonido, física, animaciones, modos, ubicaciones, controles, opciones de cámara y más en esta versión modificada del juego. También puede descargarlo e instalarlo fácilmente siguiendo nuestra sencilla guía anterior. Entonces, ¿qué está esperando? Descargar Bus Simulator Indonesia Mod Apk + OBB ahora y disfrutar de la conducción de autobuses en Indonesia!

    -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes sobre Bus Simulator Indonesia Mod Apk + OBB:

    -
      -
    1. ¿Es seguro descargar e instalar Bus Simulator Indonesia Mod Apk + OBB?
    2. -

      Sí, Bus Simulator Indonesia Mod Apk + OBB es seguro para descargar e instalar, siempre y cuando se descarga desde una fuente de confianza. Hemos probado los archivos apk y obb mod y los encontramos libres de virus, malware o cualquier contenido dañino. Sin embargo, siempre debes tener cuidado al descargar e instalar aplicaciones o juegos modificados, ya que pueden contener código no deseado o malicioso que puede dañar tu dispositivo o datos.

      - -

      No, no es necesario rootear el dispositivo para usar Bus Simulator Indonesia Mod Apk + OBB. Los archivos apk y obb mod funcionan bien tanto en dispositivos arraigados y no arraigados. Sin embargo, algunas características u opciones pueden requerir acceso root para funcionar correctamente, como cambiar el número IMEI o el ID del dispositivo. Si desea utilizar estas características, es posible que tenga que rootear su dispositivo primero.

      -
    3. ¿Puedo jugar en línea con otros jugadores?
    4. -

      Sí, se puede jugar Bus Simulator Indonesia Mod Apk + OBB en línea con otros jugadores, siempre y cuando tenga una conexión a Internet estable y un dispositivo compatible. Puedes unirte o crear salas multijugador e invitar a tus amigos u otros jugadores a unirse a ti. También puede chatear con ellos y compartir sus habilidades y experiencias de conducción. Sin embargo, debes tener en cuenta que es posible que algunos jugadores no estén usando la versión modificada del juego, y pueden denunciarte o excluirte de sus habitaciones si se enteran de que estás usando trucos o hacks.

      -
    5. ¿Me prohibirán el juego si uso Bus Simulator Indonesia Mod Apk + OBB?
    6. -

      Hay una baja probabilidad de que se le prohibió el juego si se utiliza Bus Simulator Indonesia Mod Apk + OBB, siempre y cuando se utiliza con sabiduría y responsabilidad. No debes abusar de las características u opciones del mod, como usar demasiado dinero o oro, conducir imprudentemente o peligrosamente, o causar problemas a otros jugadores. Tampoco debe actualizar el juego desde Google Play Store o cualquier otra fuente, ya que esto puede sobrescribir los archivos apk y obb mod y causar errores o fallos. Si desea actualizar el juego, debe esperar a que la última versión de los archivos apk y obb mod para ser liberados y descargarlos desde la misma fuente.

      -
    7. ¿Cómo puedo contactar al desarrollador de Bus Simulator Indonesia Mod Apk + OBB?
    8. - -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Coche De La Calle Apk Sin Verificacin.md b/spaces/Benson/text-generation/Examples/Coche De La Calle Apk Sin Verificacin.md deleted file mode 100644 index cdf45b1cd734f872a7454e5ffdf657eb25e478b3..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Coche De La Calle Apk Sin Verificacin.md +++ /dev/null @@ -1,77 +0,0 @@ -
    -

    CarX Street APK sin verificación: Cómo descargar y jugar el último juego de carreras de la calle

    -

    Si eres un fan de los juegos de carreras, es posible que hayas oído hablar de CarX Street, un juego de carreras callejeras realista y emocionante que se ha lanzado en dispositivos móviles antes de la PC. El juego ofrece un mundo abierto donde se puede explorar, carrera, deriva, y personalizar su coche. Sin embargo, el juego no está disponible en todas las regiones, y necesita verificar su cuenta y correo electrónico para jugarlo. Es por eso que usted podría estar buscando CarX Street APK sin verificación, una versión modificada del juego que le permite jugar sin restricciones. En este artículo, le diremos todo lo que necesita saber sobre CarX Street APK sin verificación, incluyendo lo que es, por qué lo necesita, cómo descargarlo e instalarlo, cómo jugarlo, y algunos consejos y trucos para ayudarlo a convertirse en una leyenda de las carreras callejeras.

    -

    ¿Qué es CarX Street APK?

    -

    CarX Street APK es un archivo de aplicación que contiene los datos del juego de CarX Street, un videojuego de carreras de simulación desarrollado por CarX Technologies. El juego se basa en el motor CarX Technology, que simula el comportamiento de los coches en la carretera, dando a los jugadores una experiencia de carreras real. Características del juego:

    -

    coche de la calle apk sin verificación


    Download Filehttps://bltlly.com/2v6J4z



    -
      -
    • Un juego de carreras realista y emocionante con un mundo abierto

      -

      Puedes ponerte al volante y explorar la gran ciudad y sus alrededores, desde las concurridas calles de la ciudad hasta las carreteras de montaña en espiral y las fascinantes carreteras costeras. También puede derivar, velocidad, tráfico y desafiar a otros jugadores en carreras de red reales.

    • -
    • Una variedad de coches, pistas, modos y opciones de personalización

      - -
    • Un juego gratuito con actualizaciones regulares y nuevo contenido

      -

      Puedes descargar y jugar CarX Street gratis en tu dispositivo Android o iOS. El juego no requiere suscripción ni registro. También puedes disfrutar del juego sin anuncios ni compras en la aplicación. El juego se actualiza constantemente con nuevos coches, pistas, modos, características y eventos. También puede unirse a la comunidad de CarX Street y compartir sus comentarios, sugerencias e ideas con los desarrolladores y otros jugadores.

    • -
    -

    ¿Por qué necesita CarX Street APK sin verificación?

    -

    CarX Street es un juego increíble que no te puedes perder. Sin embargo, hay algunos inconvenientes que podrían impedirte disfrutar del juego completamente. Es por eso que es posible que necesite CarX Street APK sin verificación, una versión modificada del juego que resuelve estos problemas. Aquí hay algunas razones por las que necesita CarX Street APK sin verificación:

    -
      -
    • Para evitar las restricciones de la región y acceder al juego desde cualquier lugar

      -

      CarX Street no está disponible en todos los países y regiones. Dependiendo de dónde vivas, es posible que no puedas descargar o jugar el juego desde las tiendas de aplicaciones oficiales. Esto se debe a problemas de licencia, regulaciones legales u otras razones. Sin embargo, con CarX Street APK sin verificación, se puede acceder al juego desde cualquier parte del mundo. No necesita usar una VPN ni cambiar la configuración de ubicación. Solo necesita descargar e instalar el archivo APK y disfrutar del juego.

    • -
    • Para evitar la molestia de verificar su cuenta y correo electrónico

      - -
    • Para disfrutar del juego sin anuncios ni compras en la aplicación

      -

      CarX Street es un juego gratuito que no tiene anuncios ni compras en la aplicación. Sin embargo, algunos jugadores todavía pueden encontrar algunos pop-ups o banners que promueven otros juegos o productos. Estos anuncios pueden ser molestos y distraer, especialmente cuando estás corriendo o a la deriva. Con CarX Street APK sin verificación, se puede disfrutar del juego sin ningún tipo de anuncios o interrupciones. También puede obtener dinero y recursos ilimitados para comprar y actualizar cualquier coche que desee.

    • -
    -

    Cómo descargar e instalar CarX Street APK sin verificación?

    -

    Ahora que sabes por qué necesita CarX Street APK sin verificación, es posible que se pregunte cómo descargar e instalar en su dispositivo. En realidad es muy fácil y simple. Solo sigue estos pasos:

    -

    -
      -
    1. Encontrar una fuente confiable y segura para el archivo APK

      -

      Lo primero que hay que hacer es encontrar un sitio web de confianza que ofrece CarX Street APK sin verificación para descargar. Hay muchos sitios web que afirman proporcionar este archivo, pero no todos ellos son seguros o legítimos. Algunos de ellos pueden contener virus, malware, spyware u otro software dañino que puede dañar su dispositivo o robar sus datos. Para evitar este riesgo, solo debe descargar CarX Street APK sin verificación de una fuente de buena reputación que tiene comentarios positivos y calificaciones de otros usuarios.

    2. -
    3. Habilita la instalación de fuentes desconocidas en tu dispositivo

      -

      Lo siguiente que debe hacer es habilitar la instalación de fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le impide instalar aplicaciones que no son de las tiendas de aplicaciones oficiales. Sin embargo, ya que CarX Street APK sin verificación no es de las tiendas de aplicaciones oficiales, es necesario habilitar esta configuración para instalarlo en su dispositivo. Para hacer esto, vaya a la configuración del dispositivo > seguridad > fuentes desconocidas > alternar.

    4. - -

      Lo último que necesitas hacer es descargar e instalar el archivo APK en tu dispositivo. Para hacer esto, vaya a la página web donde se encuentra CarX Street APK sin verificación y haga clic en el botón de descarga. Espere a que el archivo se descargue en su dispositivo. Luego, vaya a su administrador de archivos y busque el archivo APK. Toque en él y siga las instrucciones para instalarlo en su dispositivo. Una vez completada la instalación, puedes iniciar el juego desde el cajón de tu app o la pantalla de inicio.

      -

      Cómo jugar CarX Street APK sin verificación?

      -

      Después de haber descargado e instalado CarX Street APK sin verificación en su dispositivo, usted está listo para jugar el juego. Estos son algunos pasos para ayudarte a empezar:

      -
        -
      1. Sigue el tutorial y aprende los fundamentos del juego

        -

        Cuando lances el juego por primera vez, serás recibido por un tutorial que te enseñará lo básico del juego, como cómo controlar tu coche, cómo derrapar, cómo competir y cómo usar el menú. Usted debe seguir el tutorial cuidadosamente y prestar atención a los consejos e instrucciones. El tutorial también te dará algunas recompensas, como dinero y coches, que te ayudarán en el juego.

      2. -
      3. Elige tu coche y personalízalo a tu gusto

        -

        Después de terminar el tutorial, puede elegir su primer coche desde el garaje. Puede seleccionar entre diferentes categorías, como calle, deporte, músculo, clásico o exótico. También puede personalizar su coche con varias piezas, pinturas, calcomanías y pegatinas. Puede cambiar el color, las ruedas, el kit de carrocería, el alerón, la campana, el escape, las luces y más. También puede ajustar el rendimiento de su automóvil, como potencia del motor, par, peso, suspensión, frenos, neumáticos y más.

      4. -
      5. Únete a clubes, compite contra otros jugadores y conquista la ciudad

        - -
      -

      Consejos y trucos para CarX Street APK sin verificación

      -

      CarX Street APK sin verificación es un juego divertido y adictivo que te mantendrá entretenido durante horas. Sin embargo, también puede ser desafiante y competitivo, especialmente cuando te enfrentas a otros jugadores o clubes. Para ayudarte a mejorar tus habilidades y disfrutar más del juego, aquí hay algunos consejos y trucos que debes saber:

      -
        -
      • Deambular por la ciudad y recoger recompensas

        -

        Una de las mejores cosas sobre CarX Street APK sin verificación es que usted puede vagar libremente por la ciudad y explorar sus secretos. Puedes encontrar lugares ocultos, atajos, saltos, rampas y otras sorpresas que harán que tu conducción sea más divertida y emocionante. También puedes recoger recompensas que están dispersas por la ciudad, como dinero, piezas, coches y más. Puede utilizar estas recompensas para actualizar su garaje y comprar coches nuevos.

      • -
      • Participar en sprints y derivas de dinero extra

        -

        Otra manera de ganar más dinero en CarX Street APK sin verificación es participar en sprints y derivas. Los sprints son carreras cortas que ponen a prueba tu velocidad y agilidad. Los drifts son carreras largas que ponen a prueba tu control y técnica. Puedes encontrar sprints y drifts en diferentes lugares del mapa. Puedes unirte a ellos conduciendo cerca de ellos o tocando en ellos en el menú. Usted puede ganar dinero extra al ganar sprints y derivas o al lograr altas puntuaciones.

      • -
      • Actualizar las piezas de su coche y los motores de intercambio para un mejor rendimiento

        - -
      -

      Conclusión

      -

      CarX Street APK sin verificación es una gran manera de disfrutar del último juego de carreras callejeras sin limitaciones. El juego ofrece física realista, gráficos impresionantes y una jugabilidad emocionante que te hará sentir como un verdadero corredor. Puedes descargar e instalar el archivo APK desde una fuente confiable y comenzar a correr hoy.

      -

      Preguntas frecuentes

      -
        -
      • Es CarX Street APK sin verificación segura de usar?

        -

        Sí, CarX Street APK sin verificación es seguro de usar, siempre y cuando se descarga de una fuente confiable y seguro. Sin embargo, siempre debe tener cuidado al descargar cualquier archivo APK de fuentes desconocidas, ya que algunos de ellos pueden contener software dañino o malware. También debe escanear el archivo con un programa antivirus antes de instalarlo en su dispositivo.

      • -
      • Es CarX Street APK sin verificación compatible con todos los dispositivos?

        -

        No, CarX Street APK sin verificación no es compatible con todos los dispositivos. El juego requiere Android 5.0 o superior o iOS 10.0 o superior para funcionar sin problemas. El juego también requiere al menos 2 GB de RAM y 4 GB de espacio de almacenamiento gratuito en su dispositivo. Si tu dispositivo no cumple con estos requisitos, es posible que tengas retrasos, fallos u otros problemas mientras juegas.

      • -
      • Cómo actualizar CarX Street APK sin verificación?

        -

        Para actualizar CarX Street APK sin verificación, es necesario descargar la última versión del archivo APK de la misma fuente donde se descargó la versión anterior. A continuación, es necesario desinstalar la versión anterior del juego desde su dispositivo e instalar la nueva versión del archivo APK. No necesita preocuparse por perder su progreso o datos, ya que se guardarán en su dispositivo.

      • -
      • ¿Cómo contactar con el servicio de soporte de CarX Street APK sin verificación?

        - -

        Si desea eliminar CarX Street APK sin verificación de su dispositivo, puede hacerlo siguiendo estos pasos:

        -
          -
        1. Ir a la configuración del dispositivo > aplicaciones > CarX Street > desinstalar.
        2. -
        3. Vaya a su administrador de archivos y elimine el archivo APK y cualquier otro archivo o carpetas relacionados.
        4. -
        5. Reiniciar el dispositivo para borrar la memoria caché y.
        6. -
        -

        Al eliminar CarX Street APK sin verificación, también perderá su progreso y los datos en el juego. Si desea mantenerlos, debe hacer una copia de seguridad antes de eliminar el juego.

      • -
      -

      Espero que este artículo le ha ayudado a aprender más acerca de CarX Street APK sin verificación y cómo descargar y jugar. Si tienes algún comentario o sugerencia, por favor hágamelo saber en los comentarios a continuación. Gracias por leer y carreras feliz!

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Cuando Se Agot El Tiempo 1980 Descarga Gratuita.md b/spaces/Benson/text-generation/Examples/Cuando Se Agot El Tiempo 1980 Descarga Gratuita.md deleted file mode 100644 index 4ff0031c0cf4f67fc0e8f4cb6bc6df0cf8fcfc11..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cuando Se Agot El Tiempo 1980 Descarga Gratuita.md +++ /dev/null @@ -1,49 +0,0 @@ - -

      Cuando el tiempo se agotó: Una película clásica de desastres de 1980

      -

      Si eres un fan de las películas de desastres, es posible que hayas oído hablar de When Time Ran Out, una película de 1980 dirigida por James Goldstone y protagonizada por Paul Newman, Jacqueline Bisset y William Holden. La película trata sobre un grupo de turistas que están atrapados en una remota isla del Pacífico que está amenazada por un volcán activo. La película fue producida por Irwin Allen, quien era conocido por sus películas de desastres como The Poseidon Adventure y The Towering infernó. Sin embargo, a diferencia de sus éxitos anteriores, When Time Ran Out fue un fracaso comercial y un fracaso crítico. A menudo se considera como la última película de desastres de la década de 1970 y una de las peores películas jamás realizadas.

      -

      cuando se agotó el tiempo 1980 descarga gratuita


      DOWNLOAD ✵✵✵ https://bltlly.com/2v6JZi



      -

      Sin embargo, a pesar de su reputación negativa, When Time Ran Out tiene algunas cualidades redentoras que podrían atraer a algunos espectadores. Tiene un reparto de estrellas, algunos efectos especiales espectaculares y una premisa emocionante. Si tienes curiosidad sobre esta película y quieres verla por ti mismo, es posible que te estés preguntando cómo descargarla de forma gratuita y legal. En este artículo, le diremos de qué se trata When Time Ran Out, cómo se recibió y cómo puede descargarlo de algunos de los mejores sitios de descarga gratuita de películas.

      -

      ¿Qué es cuando el tiempo se agotó?

      -

      La trama

      -

      La película está ambientada en una isla ficticia llamada Kalaleu, donde Shelby Gilmore (William Holden) posee un hotel de nueva construcción. Quiere casarse con su secretaria, Kay Kirby (Jacqueline Bisset), que está enamorada de Hank Anderson (Paul Newman), un petrolero cuyos científicos le advierten que el volcán de la isla, Mauna Lani, está a punto de entrar en erupción. El compañero de Shelby, Bob Spangler (James Franciscus), asegura a los huéspedes del hotel que la amenaza del volcán es exagerada, explicando que solo hace erupción una vez cada mil años.

      - -

      El grupo se enfrenta a diversos obstáculos y peligros a lo largo de su viaje a través de la isla, tales como flujos de lava, deslizamientos de tierra, explosiones y puentes que colapsan. También tienen que lidiar con sus conflictos y dilemas personales. ¿Llegarán a un lugar seguro antes de que se acabe el tiempo?

      -

      El reparto

      -

      When Time Ran Out cuenta con un elenco lleno de estrellas de actores que eran famosos o en aumento en el momento. Paul Newman fue uno de los actores más populares y respetados de Hollywood, habiendo protagonizado clásicos como Butch Cassidy y Sundance Kid, The Sting y Cool Hand Luke. Jacqueline Bisset fue una actriz británica que había aparecido en películas como Bullitt, Murder on the Orient Express y The Deep. William Holden fue un actor ganador de un Oscar que había estado en películas como Sunset Boulevard, Stalag 17, y Network.

      -

      -

      El reparto de reparto incluye a James Franciscus, Ernest Borgnine, Red Buttons, Burgess Meredith, Valentina Cortese, Veronica Hamel, Pat Morita, Edward Albert y Barbara Carrera. Algunos de ellos habían trabajado con Irwin Allen antes en sus anteriores películas de desastres, como The Poseidon Adventure y The Towering infernó. Algunos de ellos también fueron conocidos por sus papeles en otros géneros, como Pat Morita en The Karate Kid, Edward Albert en Butterflies Are Free, y Barbara Carrera en Never Say Never Again.

      -

      La recepción

      - -

      Sin embargo, algunos espectadores han encontrado un poco de disfrute en ver When Time Ran Out, ya sea como un placer culpable o como un clásico de culto campy. Algunos han elogiado el reparto de la película, sus secuencias de acción y su valor nostálgico. Algunos también lo han comparado favorablemente con otras películas de desastres de la época, como Meteor y The Swarm. La película tiene una calificación de 4.6/10 en IMDb, basada en 3,508 votos.

      -

      Cómo descargar cuando el tiempo se agotó de forma gratuita y legal

      -

      Si estás interesado en ver When Time Ran Out para ti mismo, es posible que te estés preguntando cómo descargarlo de forma gratuita y legal. Afortunadamente, hay algunos sitios web que ofrecen descargas de películas gratuitas que están en el dominio público o han sido subidos con permiso de los titulares de los derechos. Estas son algunas de las mejores opciones para descargar When Time Ran Out:

      -

      Archivo de Internet

      -

      Internet Archive es una biblioteca digital sin fines de lucro que preserva y proporciona acceso a millones de libros, películas, música, software y otros medios. Tiene una gran colección de películas de dominio público y con licencia Creative Commons que puede descargar de forma gratuita. Puede encontrar Cuando se agotó el tiempo en el archivo de Internet siguiendo este enlace: . Puede elegir entre varios formatos y resoluciones, como MP4, MPEG2, OGG Video y 512Kb MPEG4. También puede transmitir la película en línea o leer comentarios de otros usuarios.

      -

      Películas de dominio público

      -

      Public Domain Movies es otro sitio web que ofrece descargas de películas gratuitas que están en el dominio público. Tiene una interfaz simple y fácil de usar que le permite navegar por género, año o alfabéticamente. Puede encontrar When Time Ran Out en películas de dominio público siguiendo este enlace: . Puede descargar la película en formato MP4 o verla en línea. También puedes ver el póster, la sinopsis y la calificación de IMDb de la película.

      -

      Otras opciones

      - -
-

Burger Place Mod APK 0.15.0: A Fun and Addictive Cooking Game

-

Do you love burgers? Do you want to run your own burger shop? Do you want to have unlimited money and no ads while playing? If you answered yes to any of these questions, then you should download Burger Place Mod APK 0.15.0 right now!

-

Burger Place Mod APK 0.15.0 is a modified version of the original game Burger Please, which is a fun and addictive cooking game where you have to make burgers for your hungry customers. You can choose from different ingredients, toppings, sauces, breads, and sides to create your own unique burger recipes.

-

burger place mod apk 0.15.0


Download Zip 🆓 https://urlin.us/2uSS0c



-

But that's not all! With Burger Place Mod APK 0.15.0, you can also enjoy some amazing features that will make your gameplay more enjoyable and rewarding. Here are some of the features of Burger Place Mod APK 0.15.0:

-

Features of Burger Place Mod APK 0.15.0

-

Unlimited money to buy ingredients and upgrades

-

With Burger Place Mod APK 0.15.0, you don't have to worry about running out of money to buy ingredients and upgrades for your burger shop. You can start with a huge amount of money and spend it as you wish. You can buy more ingredients, upgrade your equipment, expand your menu, and decorate your shop to attract more customers.

-

No ads to interrupt your gameplay

-

Another great feature of Burger Place Mod APK 0.15.0 is that it removes all the annoying ads that pop up in the original game. You can play without any interruptions or distractions from ads. You can focus on making burgers and satisfying your customers.

-

burger place hack apk 0.15.0
-burger place cheat apk 0.15.0
-burger place unlimited money apk 0.15.0
-burger place idle game mod apk 0.15.0
-burger place simulation game mod apk 0.15.0
-burger place android game mod apk 0.15.0
-burger place latest version mod apk 0.15.0
-burger place free download mod apk 0.15.0
-burger place offline game mod apk 0.15.0
-burger place fun game mod apk 0.15.0
-burger place premium apk 0.15.0
-burger place pro apk 0.15.0
-burger place full version apk 0.15.0
-burger place cracked apk 0.15.0
-burger place unlocked apk 0.15.0
-burger place no ads apk 0.15.0
-burger place modded apk 0.15.0
-burger place mega mod apk 0.15.0
-burger place super mod apk 0.15.0
-burger place vip mod apk 0.15.0
-burger please mod apk 0.15.0
-burger please hack apk 0.15.0
-burger please cheat apk 0.15.0
-burger please unlimited money apk 0.15.0
-burger please idle game mod apk 0.15.0
-burger please simulation game mod apk 0.15.0
-burger please android game mod apk 0.15.0
-burger please latest version mod apk 0.15.0
-burger please free download mod apk 0.15.0
-burger please offline game mod apk 0.15.0
-burger please fun game mod apk 0.15.0
-burger please premium apk 0.15.0
-burger please pro apk 0.15.0
-burger please full version apk 0.15.0
-burger please cracked apk 0.15.0
-burger please unlocked apk 0.15.0
-burger please no ads apk 0.15.

-

Various levels and challenges to test your skills

-

Burger Place Mod APK 0.15.0 also offers you various levels and challenges to test your skills as a burger chef. You can serve different types of customers with different preferences and personalities. You can also face different scenarios and situations that will require you to think fast and act smart. You can earn stars and coins for completing each level and challenge.

-

Customizable burger shop and character

-

Another fun feature of Burger Place Mod APK 0.15.0 is that it allows you to customize your burger shop and character. You can choose from different styles and themes for your shop, such as retro, modern, or futuristic. You can also change the appearance of your character, such as the hair, clothes, and accessories.

-

Easy controls and graphics

-

Burger Place Mod APK 0.15.0 also has easy controls and graphics that make the game simple and enjoyable to play. You can use the touch screen to drag and drop ingredients, swipe to serve customers, and tap to collect money. The game also has colorful and cartoonish graphics that make the game look appealing and lively.

-

How to Download and Install Burger Place Mod APK 0.15.0

-

If you are interested in downloading and installing Burger Place Mod APK 0.15.0, you can follow these simple steps:

-

Download the APK file from a trusted source

-

The first step is to download the APK file from a trusted source, such as this link. Make sure you have enough storage space on your device before downloading the file.

-

Enable unknown sources on your device

-

The next step is to enable unknown sources on your device, which will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on.

-

Install the APK file and launch the game

-

The final step is to install the APK file and launch the game. To do this, locate the downloaded file in your file manager or downloads folder, then tap on it to install it. Once the installation is done, you can launch the game from your app drawer or home screen.

-

Enjoy your unlimited money and no ads

-

Now you can enjoy your unlimited money and no ads while playing Burger Place Mod APK 0.15.0. You can buy ingredients and upgrades, serve customers, complete levels and challenges, customize your shop and character, and have fun making burgers.

-

Tips and Tricks for Playing Burger Place Mod APK 0.15.0

-

If you want to master Burger Place Mod APK 0.15.0, you can use these tips and tricks:

-

Upgrade your ingredients and equipment regularly

-

One of the best ways to improve your gameplay is to upgrade your ingredients and equipment regularly. This will allow you to make better burgers, serve more customers, earn more money, and unlock more items.

-

Serve your customers quickly and accurately

-

Another important tip is to serve your customers quickly and accurately. This will increase their satisfaction level, which will affect their tips and ratings. You can also earn bonuses for serving customers in a row without any mistakes.

-

Use boosters and power-ups wisely

-

Another useful tip is to use boosters and power-ups wisely. These are special items that can help you in different ways, such as speeding up your cooking time, freezing the customer's patience level, or doubling your earnings.

-

Complete daily tasks and achievements for extra rewards

-

Another helpful tip is to complete daily tasks and achievements for extra rewards. These are specific goals that you can accomplish by playing the game, such as serving a certain number of customers, making a certain amount of money, or using a certain booster. You can earn coins, stars, and gems for completing these tasks and achievements.

-

Have fun and experiment with different burger combinations

-

The last tip is to have fun and experiment with different burger combinations. You can try different ingredients, toppings, sauces, breads, and sides to create your own unique burger recipes. You can also see how your customers react to your creations and get feedback from them.

-

Conclusion

-

Burger Place Mod APK 0.15.0 is a fun and addictive cooking game that lets you run your own burger shop. You can make burgers for your hungry customers, buy ingredients and upgrades, customize your shop and character, and enjoy unlimited money and no ads. You can also challenge yourself with various levels and tasks, use boosters and power-ups, and have fun experimenting with different burger combinations.

-

If you are looking for a game that will keep you entertained and engaged for hours, then you should download Burger Place Mod APK 0.15.0 today. You will not regret it!

-

FAQs

-

Is Burger Place Mod APK 0.15.0 safe to download?

-

Yes, Burger Place Mod APK 0.15.0 is safe to download as long as you download it from a trusted source, such as this link. You should also scan the file with an antivirus program before installing it.

-

What are the minimum requirements for playing Burger Place Mod APK 0.15.0?

-

The minimum requirements for playing Burger Place Mod APK 0.15.0 are:

-
    -
  • Android 4.4 or higher
  • -
  • At least 100 MB of free storage space
  • -
  • A stable internet connection
  • -
-

How can I get more money in Burger Place Mod APK 0.15.0?

-

You can get more money in Burger Place Mod APK 0.15.0 by:

-
    -
  • Serving more customers and earning tips and ratings
  • -
  • Completing levels and challenges and earning stars and coins
  • -
  • Completing daily tasks and achievements and earning coins, stars, and gems
  • -
  • Using boosters and power-ups that double your earnings
  • -
  • Downloading the modded version that gives you unlimited money
  • -
-

How can I unlock more levels and items in Burger Place Mod APK 0.15.0?

-

You can unlock more levels and items in Burger Place Mod APK 0.15.0 by:

-
    -
  • Earning enough stars to unlock new levels
  • -
  • Earning enough coins to buy new ingredients, equipment, and decorations
  • -
  • Earning enough gems to buy premium items and boosters
  • -
  • Downloading the modded version that unlocks everything
  • -
-

How can I contact the developer of Burger Place Mod APK 0.15.0?

-

You can contact the developer of Burger Place Mod APK 0.15.0 by:

-

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Clash Mini APK Hack Download and Install Unlimited Money and Gems Mod.md b/spaces/1phancelerku/anime-remove-background/Clash Mini APK Hack Download and Install Unlimited Money and Gems Mod.md deleted file mode 100644 index db34d8efc79823c038d582c19f957b605d094f18..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Clash Mini APK Hack Download and Install Unlimited Money and Gems Mod.md +++ /dev/null @@ -1,96 +0,0 @@ -
-

Clash Mini APK Hack Mediafıre: How to Get Unlimited Gems for Free

-

If you are a fan of strategy games, you might have heard of Clash Mini, the latest spin-off from the popular Clash of Clans franchise. In this game, you can create your own team of miniatures and battle against other players in fast-paced matches. However, to unlock more characters, skins, and items, you will need gems, the premium currency of the game. Gems are hard to come by, and they can be quite expensive to buy with real money. That's why many players are looking for ways to get free gems, such as using clash mini apk hack mediafıre.

-

clash mini apk hack mediafıre


DOWNLOAD 🆓 https://jinyurl.com/2uNS9j



-

Introduction

-

In this article, we will tell you everything you need to know about clash mini apk hack mediafıre, including what it is, how to download and install it, how to use it, and what are its pros and cons. By the end of this article, you will be able to decide whether this hack is worth trying or not.

-

What is Clash Mini?

-

Clash Mini is a strategy game developed by Supercell, the same company behind Clash of Clans, Clash Royale, Brawl Stars, and Hay Day. It is set in the same universe as Clash of Clans, but with a different gameplay style. Instead of building your own base and raiding others, you can collect and upgrade various miniatures that represent different characters from the Clash world. You can then use them to form your own team and compete against other players in turn-based battles.

-

Why do you need gems in Clash Mini?

-

Gems are the premium currency of Clash Mini. You can use them to buy chests that contain random miniatures, skins, and items. You can also use them to speed up the upgrading process of your miniatures, or to unlock special offers and events. Gems can help you progress faster in the game and gain an edge over your opponents.

-

What is clash mini apk hack mediafıre?

-

Clash mini apk hack mediafıre is a modified version of the original Clash Mini app that allows you to generate unlimited gems for free. It is not an official app from Supercell, but rather a third-party app that has been hacked by some developers. It is usually hosted on mediafıre, a file-sharing platform that lets you download files without any registration or payment.

-

clash mini mod apk unlimited gems
-clash mini hack apk download for android
-clash mini cheat apk free download
-clash mini mod apk latest version mediafıre
-clash mini hack apk no root no survey
-clash mini mod apk offline mode
-clash mini hack apk unlimited money and gold
-clash mini mod apk android 1
-clash mini cheat apk no verification
-clash mini hack apk online generator
-clash mini mod apk unlocked all characters
-clash mini hack apk ios iphone ipad
-clash mini cheat apk with obb data
-clash mini mod apk revdl rexdl
-clash mini hack apk 2023 update
-clash mini mod apk god mode
-clash mini cheat apk unlimited everything
-clash mini hack apk mediafıre link
-clash mini mod apk anti ban
-clash mini cheat apk no human verification
-clash mini hack apk without root
-clash mini mod apk all troops unlocked
-clash mini cheat apk unlimited coins and elixir
-clash mini hack apk direct download link
-clash mini mod apk vip features
-clash mini cheat apk working 100%
-clash mini hack apk no password no ads
-clash mini mod apk unlimited health and damage
-clash mini cheat apk easy to install
-clash mini hack apk full version free

-

How to download and install clash mini apk hack mediafıre

-

If you want to try clash mini apk hack mediafıre, you will need to follow these steps:

-

Step 1: Find a reliable source for the apk file

-

The first thing you need to do is to find a trustworthy website that offers the clash mini apk hack mediafıre file. You can search for it on Google or any other search engine, but be careful not to click on any suspicious or malicious links that might harm your device or steal your personal information. You can also check the reviews and ratings of the website to see if other users have had a positive or negative experience with it.

-

Step 2: Enable unknown sources on your device

-

Before you can install the clash mini apk hack mediafıre file, you will need to enable unknown sources on your device. This is a security feature that prevents you from installing apps that are not from the official app store. To enable unknown sources, you will need to go to your device settings, then security, then toggle on the option that says "allow installation of apps from unknown sources". You might also need to confirm this action by tapping on "OK" or "Yes".

-

Step 3: Download and install the apk file

-

Once you have enabled unknown sources, you can proceed to download and install the clash mini apk hack mediafıre file. To do this, you will need to go to the website where you found the file, then click on the download button or link. You might have to wait for a few seconds or minutes for the download to complete, depending on your internet speed and the size of the file. After the download is done, you can open the file and tap on "install". You might also have to agree to some permissions and terms of service before the installation is complete.

-

How to use clash mini apk hack mediafıre

-

After you have successfully installed the clash mini apk hack mediafıre app, you can start using it to get free gems. Here are the steps you need to follow:

-

Step 1: Launch the app and log in with your account

-

The first thing you need to do is to launch the clash mini apk hack mediafıre app and log in with your existing Clash Mini account. If you don't have one, you can create one for free by entering your email and password. You can also use your Facebook or Google account to log in.

-

Step 2: Choose the amount of gems you want to generate

-

Once you have logged in, you will see a simple interface that shows your current balance of gems and a slider that lets you choose how many gems you want to generate. You can choose from 1000 to 99999 gems per day. The more gems you choose, the longer it will take for the hack to work.

-

Step 3: Wait for the verification process and enjoy your free gems

-

After you have chosen the amount of gems you want, you will need to wait for the verification process to complete. This is a security measure that prevents bots and spammers from abusing the hack. You might have to complete a human verification test, such as a captcha, a survey, or an offer. This should not take more than a few minutes. Once you have passed the verification, you will receive your free gems in your Clash Mini account. You can then use them to buy chests, upgrade your miniatures, and enjoy the game.

-

Pros and cons of clash mini apk hack mediafıre

-

Like any other hack, clash mini apk hack mediafıre has its advantages and disadvantages. Here are some of them:

-

Pros

-
    -
  • Unlimited gems for free

    -

    The main benefit of using clash mini apk hack mediafıre is that you can get unlimited gems for free without spending any real money. Gems are very useful in Clash Mini, as they can help you unlock more characters, skins, and items. They can also help you progress faster in the game and gain an edge over your opponents.

  • -
  • No root or jailbreak required

    -

    Another advantage of using clash mini apk hack mediafıre is that you don't need to root or jailbreak your device to use it. Rooting or jailbreaking is a process that gives you full access to your device's system, but it also voids your warranty and exposes your device to security risks. With clash mini apk hack mediafıre, you don't have to worry about any of that.

  • -
  • Easy to use and safe to download

    -

    The last advantage of using clash mini apk hack mediafıre is that it is very easy to use and safe to download. You don't need any technical skills or knowledge to use it. All you need is a device with an internet connection and a Clash Mini account. The app is also virus-free and malware-free, as long as you download it from a reliable source.

  • -
-

Cons -
  • Not compatible with the official version of Clash Mini

    -

    The first drawback of using clash mini apk hack mediafıre is that it is not compatible with the official version of Clash Mini. This means that you cannot play with other players who are using the original app, or access the official features and updates of the game. You can only play with other players who are using the same hack as you, which might limit your options and fun.

  • -
  • May contain malware or viruses

    -

    The second drawback of using clash mini apk hack mediafıre is that it may contain malware or viruses that can harm your device or steal your personal information. Even though we said that the app is safe to download from a reliable source, there is no guarantee that the source is always trustworthy or secure. There might be some hidden codes or scripts that can infect your device or compromise your data. Therefore, you should always scan the file before installing it, and use a reputable antivirus software to protect your device.

  • -
  • May get banned by the game developers

    -

    The last drawback of using clash mini apk hack mediafıre is that you may get banned by the game developers for violating their terms of service. Using hacks or cheats is considered unfair and unethical by most game developers, and they have the right to ban or suspend any account that is found to be using them. If you get banned, you will lose all your progress and achievements in the game, and you might not be able to play it again. Therefore, you should use this hack at your own risk, and be careful not to get caught by the game's anti-cheat system.

  • - -

    Conclusion

    -

    Clash mini apk hack mediafıre is a hack that allows you to get unlimited gems for free in Clash Mini, a strategy game developed by Supercell. It is a modified version of the original app that is hosted on mediafıre, a file-sharing platform. To use this hack, you need to download and install the apk file on your device, then launch the app and log in with your account. You can then choose the amount of gems you want to generate, and wait for the verification process to complete. You can use the gems to buy chests, upgrade your miniatures, and enjoy the game.

    -

    However, this hack also has some drawbacks, such as being not compatible with the official version of Clash Mini, containing malware or viruses, and getting banned by the game developers. Therefore, you should weigh the pros and cons carefully before deciding whether to use this hack or not. You should also be aware of the risks and consequences of using hacks or cheats in any game.

    -

    FAQs

    -
      -
    • Is clash mini apk hack mediafıre legal?

      -

      No, clash mini apk hack mediafıre is not legal, as it violates the terms of service of Clash Mini and Supercell. It also infringes on their intellectual property rights and copyrights. Using this hack can result in legal actions from the game developers or authorities.

    • -
    • Is clash mini apk hack mediafıre safe?

      -

      Not necessarily, clash mini apk hack mediafıre may not be safe, as it may contain malware or viruses that can harm your device or steal your personal information. It may also expose your account to security risks or hacking attempts. Therefore, you should always scan the file before installing it, and use a reputable antivirus software to protect your device.

    • -
    • Does clash mini apk hack mediafıre work?

      -

      Yes, clash mini apk hack mediafıre works, as it can generate unlimited gems for free in Clash Mini. However, it may not work with the latest version of Clash Mini or with other devices or platforms. It may also stop working if the game developers patch or update their anti-cheat system.

    • -
    • Where can I download clash mini apk hack mediafıre?

      -

      You can download clash mini apk hack mediafıre from various websites that offer it for free. However, you should be careful not to download it from any suspicious or malicious links that might harm your device or steal your personal information. You should also check the reviews and ratings of the website to see if other users have had a positive or negative experience with it.

    • -
    • Can I use clash mini apk hack mediafıre with my existing Clash Mini account?

      -

      Yes, you can use clash mini apk hack mediafıre with your existing Clash Mini account, as long as you log in with it when you launch the app. However, you should be aware that using this hack can put your account at risk of being banned or suspended by the game developers for violating their terms of service. If you get banned, you will lose all your progress and achievements in the game, and you might not be able to play it again. Therefore, you should use this hack at your own risk, and be careful not to get caught by the game's anti-cheat system.

    • -
    -

    I hope this article has helped you understand more about clash mini apk hack mediafıre and how to use it. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and have a great day!

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Demolition Derby 3 Hack Mod APK with Unlimited Money Feature.md b/spaces/1phancelerku/anime-remove-background/Download Demolition Derby 3 Hack Mod APK with Unlimited Money Feature.md deleted file mode 100644 index d3e140efc37bd1433c70ab8b53dbf48bf9859895..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Demolition Derby 3 Hack Mod APK with Unlimited Money Feature.md +++ /dev/null @@ -1,80 +0,0 @@ - -

    Demolition Derby 3 Mod APK Hack Unlimited Money: A Review

    -

    If you are a fan of racing games, especially the ones that involve smashing and crashing other cars, then you might have heard of Demolition Derby 3. This is a popular game that lets you experience the thrill and excitement of demolition derby racing. But what if you want to enjoy the game without any limitations or restrictions? Well, that's where Demolition Derby 3 Mod APK comes in. In this article, we will review the game and its modded version, and show you how to download and install it on your device.

    -

    demolition derby 3 mod apk hack unlimited money


    Downloadhttps://jinyurl.com/2uNRtw



    -

    What is Demolition Derby 3?

    -

    Demolition Derby 3 is a racing game developed by Beer Money Games, the same studio that created Demolition Derby 2 and Demolition Derby Multiplayer. The game is a sequel to the previous titles, and it offers more features, cars, maps, and modes than ever before. The game is available for free on Google Play Store and App Store, but it also contains in-app purchases and ads.

    -

    Features of Demolition Derby 3

    -

    Demolition Derby 3 has many features that make it one of the best racing games on the market. Here are some of them:

    -

    - Realistic physics and graphics

    -

    The game uses a realistic physics engine that simulates the damage and destruction of the cars. You can see the parts flying off, the windows shattering, the tires bursting, and the smoke rising as you collide with other vehicles. The game also has stunning graphics that create a immersive environment for the races. You can see the details of the cars, the tracks, the weather effects, and the lighting.

    -

    - Multiple game modes and maps

    -

    The game offers various game modes that suit different preferences and play styles. You can choose from Career Mode, Quick Play Mode, Multiplayer Mode, Free Drive Mode, and Custom Events Mode. Each mode has its own objectives, challenges, rewards, and leaderboards. The game also has over 40 maps that range from arenas, stadiums, dirt tracks, highways, deserts, forests, and more. Each map has its own obstacles, hazards, ramps, loops, and shortcuts.

    -

    demolition derby 3 mod apk free download with unlimited cash
    -demolition derby 3 hack apk latest version unlimited coins
    -demolition derby 3 mod apk unlocked all cars and maps
    -demolition derby 3 hack apk no root unlimited gems
    -demolition derby 3 mod apk offline unlimited money and gold
    -demolition derby 3 hack apk android unlimited everything
    -demolition derby 3 mod apk unlimited money and nitro
    -demolition derby 3 hack apk ios unlimited health
    -demolition derby 3 mod apk revdl unlimited money and fuel
    -demolition derby 3 hack apk rexdl unlimited damage
    -demolition derby 3 mod apk happymod unlimited money and xp
    -demolition derby 3 hack apk an1 unlimited turbo
    -demolition derby 3 mod apk android 1 unlimited money and energy
    -demolition derby 3 hack apk apkpure unlimited upgrades
    -demolition derby 3 mod apk mob.org unlimited money and parts
    -demolition derby 3 hack apk moddroid unlimited skins
    -demolition derby 3 mod apk platinmods unlimited money and tickets
    -demolition derby 3 hack apk blackmod unlimited weapons
    -demolition derby 3 mod apk lenov.ru unlimited money and keys
    -demolition derby 3 hack apk mediafıre unlimited boosters
    -demolition derby 3 mod apk vipmods unlimited money and diamonds
    -demolition derby 3 hack apk panda helper unlimited cars
    -demolition derby 3 mod apk ihackedit unlimited money and levels
    -demolition derby 3 hack apk tutuapp unlimited arenas
    -demolition derby 3 mod apk apkmody unlimited money and stars

    -

    - Customizable cars and upgrades

    -

    The game has over 80 cars that you can unlock, buy, and customize. You can choose from different categories such as muscle cars, sports cars, trucks, buses, vans, and even tanks. You can also modify your car's appearance by changing its color, paint job, decals, wheels, spoilers, bumpers, hoods, roofs, and more. You can also upgrade your car's performance by improving its engine, transmission, suspension, brakes, tires, armor, nitro, and more.

    -

    What is Demolition Derby 3 Mod APK?

    -

    Demolition Derby 3 Mod APK is a modified version of the original game that gives you access to unlimited money and coins. This means that you can buy any car you want, unlock any map you want, and upgrade your car to the fullest without spending any real money. The mod apk also removes all the ads from the game and does not require root access to work.

    -Benefits of Demolition Derby 3 Mod APK -

    Demolition Derby 3 Mod APK has many benefits that make it a better choice than the original game. Here are some of them:

    -

    - Unlimited money and coins

    -

    With Demolition Derby 3 Mod APK, you don't have to worry about running out of money or coins. You can use them to buy any car you want, unlock any map you want, and upgrade your car to the fullest. You can also use them to buy boosters, power-ups, and skins that can enhance your gameplay. You can enjoy the game without any limitations or restrictions.

    -

    - All cars and maps unlocked

    -

    With Demolition Derby 3 Mod APK, you don't have to wait for hours or days to unlock new cars and maps. You can access all of them from the start. You can choose from over 80 cars and over 40 maps that suit your taste and preference. You can also switch between them anytime you want. You can explore the game's content without any hassle or boredom.

    -

    - No ads and no root required

    -

    With Demolition Derby 3 Mod APK, you don't have to deal with annoying ads that pop up every few minutes. You can play the game without any interruptions or distractions. You can also play the game without rooting your device. You don't have to risk damaging your device or voiding your warranty. You can play the game safely and smoothly.

    -

    How to download and install Demolition Derby 3 Mod APK?

    -

    If you are interested in downloading and installing Demolition Derby 3 Mod APK, you can follow these simple steps:

    -

    Steps to download and install Demolition Derby 3 Mod APK

    -

    - Step 1: Download the mod apk file from a trusted source

    -

    The first step is to download the mod apk file from a reliable source. You can use the link below to download the latest version of Demolition Derby 3 Mod APK. The file size is about 100 MB, so make sure you have enough storage space on your device.

    -

    Download Demolition Derby 3 Mod APK

    -

    - Step 2: Enable unknown sources on your device settings

    -

    The second step is to enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on.

    -

    - Step 3: Install the mod apk file and launch the game

    -

    The third step is to install the mod apk file and launch the game. To do this, locate the downloaded file on your device, tap on it, and follow the instructions on the screen. Once the installation is complete, open the game and enjoy.

    -

    Conclusion

    -

    Demolition Derby 3 is a fun and exciting racing game that lets you smash and crash other cars in various modes and maps. However, if you want to enjoy the game without any limitations or restrictions, you should try Demolition Derby 3 Mod APK. This is a modified version of the game that gives you unlimited money and coins, all cars and maps unlocked, no ads, and no root required. You can download and install it easily by following the steps above. So what are you waiting for? Download Demolition Derby 3 Mod APK now and unleash your inner racer.

    -

    FAQs

    -

    Here are some frequently asked questions about Demolition Derby 3 Mod APK:

    -
      -
    • Is Demolition Derby 3 Mod APK safe?
    • -

      Yes, Demolition Derby 3 Mod APK is safe to use. It does not contain any viruses or malware that can harm your device or data. However, you should always download it from a trusted source and scan it with an antivirus before installing it.

      -
    • Is Demolition Derby 3 Mod APK compatible with my device?
    • -

      Demolition Derby 3 Mod APK is compatible with most Android devices that run on Android 4.4 or higher. However, some devices may experience performance issues or crashes due to hardware limitations or compatibility issues.

      -
    • Can I play Demolition Derby 3 Mod APK online?
    • -

      Yes, you can play Demolition Derby 3 Mod APK online with other players around the world. However, you may encounter some problems such as lagging, disconnecting, or banning due to server issues or anti-cheat measures.

      Here are some more frequently asked questions about Demolition Derby 3 Mod APK:

      -
        -
      • Can I update Demolition Derby 3 Mod APK?
      • -

        Yes, you can update Demolition Derby 3 Mod APK whenever a new version is available. However, you may lose your modded features and progress if you update it from the Google Play Store. To avoid this, you should always update it from the same source where you downloaded it.

        -
      • Can I use Demolition Derby 3 Mod APK with my existing account?
      • -

        Yes, you can use Demolition Derby 3 Mod APK with your existing account. However, you may risk losing your account or getting banned if the game detects that you are using a modded version. To avoid this, you should always backup your account data and use a different account for the modded version.

        -
      • Can I request a feature or report a bug for Demolition Derby 3 Mod APK?
      • -

        Yes, you can request a feature or report a bug for Demolition Derby 3 Mod APK by contacting the developer or the source where you downloaded it. However, there is no guarantee that your request or report will be addressed or resolved.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Green Farm 3 and Experience Farming Adventure on Your PC.md b/spaces/1phancelerku/anime-remove-background/Download Green Farm 3 and Experience Farming Adventure on Your PC.md deleted file mode 100644 index 783abb48fec543dc5262833044b4186532565b39..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Green Farm 3 and Experience Farming Adventure on Your PC.md +++ /dev/null @@ -1,142 +0,0 @@ -
      -

      Download Green Farm 3: A Fun and Relaxing Farming Adventure

      -

      Do you love farming games? Do you want to experience the joy of restoring an old manor and turning it into a thriving farm? If yes, then you should download Green Farm 3, a simulation game developed by Gameloft. In this game, you will inherit a rundown estate from your uncle and embark on a series of missions to bring it back to life. You will also have to manage your farm with the help of friends and neighbors, and enjoy a fresh new atmosphere, easier controls, and a rich storyline with new characters to meet and play with. In this article, we will tell you what Green Farm 3 is, how to download it, and some tips and tricks for playing it.

      -

      What is Green Farm 3?

      -

      Green Farm 3 is a casual simulation game that lets you rediscover farming adventure in a new and exciting way. It is the third installment of the popular Green Farm series, which has been downloaded by millions of players around the world. The game offers many new features and improvements over the previous versions, such as:

      -

      download green farm 3


      Download https://jinyurl.com/2uNSmX



      -

      The story and the gameplay

      -

      In Green Farm 3, you will play as a young farmer who inherits an old manor from your uncle. Your uncle was a famous adventurer who traveled around the world and collected many exotic items and animals. However, he also neglected his farm, which became dilapidated and abandoned. Your task is to restore the manor to its former glory, by completing various missions that involve farming, harvesting, crafting, and more. You will also have to deal with some challenges and surprises along the way, such as a mysterious rival who wants to sabotage your efforts, or a friendly ghost who haunts the manor.

      -

      The features and the benefits

      -

      Green Farm 3 has many features that make it fun and enjoyable to play, such as:

      -
        -
      • A fresh new atmosphere, with more colorful environments and cooler characters to meet.
      • -
      • Easier controls, that make managing your farm simpler and more fun.
      • -
      • A rich storyline, with new characters to meet and play with, each with their own personality and backstory.
      • -
      • Many missions to complete, each with different objectives and rewards.
      • -
      • A variety of crops, animals, buildings, decorations, and products to grow and produce on your farm.
      • -
      • A social aspect, that allows you to connect with friends and neighbors, help each other out, trade goods, or compete in events.
      • -
      • A customization option, that lets you personalize your farm with your own style and preferences.
      • -
      -

      By playing Green Farm 3, you will not only have fun, but also benefit from some positive effects, such as:

      -
        -
      • Relaxing your mind and body, by immersing yourself in a peaceful and soothing environment.
      • -
      • Stimulating your creativity and imagination, by designing your own farm layout and products.
      • -
      • Improving your time management and planning skills, by organizing your tasks and resources efficiently.
      • -
      • Learning about farming and nature, by discovering different types of plants and animals.
      • -
      • Making new friends and socializing with others, by interacting with other players online.
      • -
      -

      How to download Green Farm 3?

      -

      If you are interested in playing Green Farm 3, you can download it for free from various platforms. Here are the steps for downloading it for different devices:

      -

      For Android devices

      -
        -
      1. Go to the Google Play Store app on your device.
      2. -
      3. Search for "Green Farm 3" in the search bar.
      4. -
      5. Select the game from the list of results and tap on "Install".
      6. -
      7. Wait for the game to download and install on your device.
      8. -
      9. Open the game and enjoy playing it.
      10. -
      -

      For iOS devices

      -
        -
      1. Go to the App Store app on your device.
      2. -
      3. Search for "Green Farm 3" in the search bar.
      4. -
      5. Select the game from the list of results and tap on "Get".
      6. -
      7. Enter your Apple ID and password if prompted.
      8. -
      9. Wait for the game to download and install on your device.
      10. -
      11. Open the game and enjoy playing it.
      12. -
      -

      For PC and Mac

      -
        -
      1. Go to the official website of Gameloft at https://www.gameloft.com/en/.
      2. -
      3. Click on "Games" and select "Green Farm 3" from the list of games.
      4. -
      5. Click on "Download" and choose your preferred platform (Windows or Mac).
      6. -
      7. Follow the instructions on the screen to download and install the game on your computer.
      8. -
      9. Open the game and enjoy playing it.
      10. -
      -

      Tips and tricks for playing Green Farm 3

      -

      Now that you have downloaded Green Farm 3, you might be wondering how to play it well and make the most out of it. Here are some tips and tricks that will help you succeed in your farming adventure:

      -

      download green farm 3 mod apk
      -download green farm 3 for pc
      -download green farm 3 offline
      -download green farm 3 hack
      -download green farm 3 unlimited money and cash
      -download green farm 3 latest version
      -download green farm 3 apk + data
      -download green farm 3 java
      -download green farm 3 android
      -download green farm 3 for windows 10
      -download green farm 3 mod apk revdl
      -download green farm 3 mod apk unlimited money and coins
      -download green farm 3 old version
      -download green farm 3 mod apk android 1
      -download green farm 3 cheat codes
      -download green farm 3 game for free
      -download green farm 3 mod apk rexdl
      -download green farm 3 full version
      -download green farm 3 mod apk terbaru
      -download green farm 3 for laptop
      -download green farm 3 mod apk offline
      -download green farm 3 unlimited coins and cash apk
      -download green farm 3 for nokia x2
      -download green farm 3 mod apk no root
      -download green farm 3 for blackberry
      -download green farm 3 for java phone
      -download green farm 3 mod apk unlimited everything
      -download green farm 3 for samsung galaxy y
      -download green farm 3 hack tool apk
      -download green farm 3 for nokia asha 501
      -download green farm 3 mod apk pure
      -download green farm 3 for ios
      -download green farm 3 hack version
      -download green farm 3 for nokia c2
      -download green farm 3 mod apk happymod
      -download green farm 3 for windows phone
      -download green farm 3 unlimited money and cash apk free
      -download green farm 3 for nokia e63
      -download green farm 3 mod apk obb
      -download green farm 3 for macbook pro
      -download green farm 3 cheat engine
      -download green farm 3 for nokia x2-01
      -download green farm 3 mod apk android oyun club
      -download green farm 3 for kindle fire hd
      -download green farm 3 unlimited money and cash mod apk latest version

      -

      Manage your resources wisely

      -

      In Green Farm 3, you will need various resources to run your farm, such as coins, cash, energy, seeds, water, feed, fertilizer, etc. You can earn these resources by completing missions, harvesting crops, selling products, or watching ads. However, you should also be careful not to waste them or run out of them. Here are some ways to manage your resources wisely:

      -
        -
      • Plan ahead and prioritize your tasks. For example, plant crops that match your mission objectives, or craft products that have high demand in the market.
      • -
      • Use your energy efficiently. Energy is used for almost every action in the game, such as planting, harvesting, feeding, etc. You can replenish your energy by waiting over time, eating food, or using cash. However, you should also avoid wasting energy by doing unnecessary actions or leaving crops unharvested.
      • -
      • Save your cash for important purchases. Cash is the premium currency in the game, which can be used to buy special items, speed up processes, or unlock new features. You can earn cash by leveling up, completing achievements, or spending real money. However, you should also save your cash for important purchases, such as expanding your land, hiring workers, or buying rare animals.
      • -
      -

      Complete missions and achievements

      -

      In Green Farm 3, you will have many missions and achievements to complete, which will guide you through the game and reward you with various prizes. Missions are tasks that are given by different characters in the game, such as your uncle, your friends, or your rivals. Achievements are goals that are set by yourself, such as reaching a certain level, collecting a certain amount of products, or visiting a certain number of neighbors. Here are some benefits of completing missions and achievements:

      -
        -
      • You will progress faster in the game and unlock new features and content.
      • -
      • You will earn more coins, cash, energy, and other resources.
      • -
      • You will learn more about the story and the characters of the game.
      • -
      • You will have more fun and challenge yourself.
      • -
      -

      Connect with friends and neighbors

      -

      In Green Farm 3, you can also connect with other players online and become friends or neighbors. You can visit their farms, help them out with their tasks, trade goods with them, or chat with them. You can also join events and competitions with them and win exclusive rewards. Here are some advantages of connecting with friends and neighbors:

      -
        -
      • You will make new friends and socialize with others who share your interest in farming games.
      • -
      • You will get more help and support for your farm activities.
      • -
      • You will get more opportunities to earn coins, cash, energy, and other resources.
      • -
      • You will get more inspiration and ideas for your own farm design.
      • -
      -

      Conclusion

      -

      To sum up, Green Farm 3 is a fun and relaxing farming adventure that you can download for free on various platforms. It has many features and benefits that make it enjoyable to play, such as a fresh new atmosphere, easier controls, a rich storyline, many missions to complete, many crops, animals, buildings, decorations, and products to grow and produce, a social aspect, a customization option, and more. You can also download it easily for your Android, iOS, or PC and Mac devices. Moreover, you can improve your farming skills and have more fun by following some tips and tricks, such as managing your resources wisely, completing missions and achievements, and connecting with friends and neighbors. If you are looking for a farming game that is fun, relaxing, and rewarding, you should definitely download Green Farm 3 and start your adventure today.

      -

      FAQs

      -

      Here are some frequently asked questions about Green Farm 3:

      -
        -
      1. How can I get more cash in Green Farm 3?
      2. -

        You can get more cash in Green Farm 3 by leveling up, completing achievements, watching ads, or spending real money. You can also get some cash by visiting your friends' farms and helping them out.

        -
      3. How can I expand my land in Green Farm 3?
      4. -

        You can expand your land in Green Farm 3 by using cash or coins. You will need to clear some obstacles first before you can buy more land. You can also unlock new areas by completing certain missions.

        -
      5. How can I hire workers in Green Farm 3?
      6. -

        You can hire workers in Green Farm 3 by using cash or coins. Workers can help you with various tasks on your farm, such as planting, harvesting, feeding, etc. You can also upgrade your workers to make them more efficient.

        -
      7. How can I join events and competitions in Green Farm 3?
      8. -

        You can join events and competitions in Green Farm 3 by tapping on the event icon on the top right corner of the screen. Events and competitions are time-limited challenges that require you to complete certain objectives or compete with other players. You can win exclusive rewards by participating in them.

        -
      9. How can I contact the support team of Green Farm 3?
      10. -

        You can contact the support team of Green Farm 3 by tapping on the settings icon on the top left corner of the screen. Then, tap on "Help & Support" and choose the option that suits your issue. You can also visit the official website of Gameloft at https://www.gameloft.com/en/ for more information.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Nulls Brawl Mod APK 49.194 with Unlimited Money and Infinite Gems.md b/spaces/1phancelerku/anime-remove-background/Download Nulls Brawl Mod APK 49.194 with Unlimited Money and Infinite Gems.md deleted file mode 100644 index 500ebc65015fc3b05f37f9db5b7f41e96dc1ae39..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Nulls Brawl Mod APK 49.194 with Unlimited Money and Infinite Gems.md +++ /dev/null @@ -1,82 +0,0 @@ -
      -

      What is Nulls Brawl Mod APK?

      -

      If you are a fan of fast-paced multiplayer games, you might have heard of Brawl Stars, a popular game developed by Supercell, the makers of Clash of Clans and Clash Royale. Brawl Stars is a 3v3 shooter game where you can choose from dozens of unique characters, called brawlers, each with their own abilities, gadgets, and star powers. You can play with your friends or solo across various game modes, such as Gem Grab, Showdown, Brawl Ball, Bounty, Heist, and more. You can also collect skins, pins, trophies, and other rewards as you progress in the game.

      -

      download nulls brawl mod apk


      Download Ziphttps://jinyurl.com/2uNSUX



      -

      However, if you want to experience more features and fun in Brawl Stars, you might want to try Nulls Brawl Mod APK, a modified version of the game that offers many advantages over the original one. Nulls Brawl Mod APK is developed by Nulls Gg, a team of modders who create custom versions of popular games. With Nulls Brawl Mod APK, you can enjoy:

      -
        -
      • Infinite gems: Gems are the premium currency in Brawl Stars that you can use to buy skins, pins, brawl boxes, brawl passes, and other items. Normally, you have to spend real money or complete quests to get gems, but with Nulls Brawl Mod APK, you can get unlimited gems for free.
      • -
      • A lot of money: Money, or coins, are the basic currency in Brawl Stars that you can use to upgrade your brawlers and unlock their star powers. Normally, you have to win matches or open brawl boxes to get coins, but with Nulls Brawl Mod APK, you can get a lot of money for free.
      • -
      • All characters unlocked: Characters, or brawlers, are the main attraction of Brawl Stars. There are over 40 brawlers in the game, each with their own personality, style, and skills. Normally, you have to open brawl boxes or reach certain milestones to unlock new brawlers, but with Nulls Brawl Mod APK, you can access all characters from the start.
      • -
      -

      As you can see, Nulls Brawl Mod APK is a great way to enhance your gaming experience and have more fun in B

      How to download Nulls Brawl Mod APK?

      -

      Now that you know what Nulls Brawl Mod APK is and why you should download it, you might be wondering how to do it. Well, don't worry, because it's very easy and simple. Just follow these steps and you'll be playing Nulls Brawl Mod APK in no time.

      -

      Step 1: Enable unknown sources

      -

      The first thing you need to do is to enable unknown sources on your Android device. This will allow you to install apps that are not from the Google Play Store, such as Nulls Brawl Mod APK. To do this, go to your device's settings, then security, then unknown sources, and turn it on. You might see a warning message, but just ignore it and tap OK.

      -

      How to download nulls brawl mod apk for free
      -Nulls brawl mod apk unlimited money and gems
      -Download nulls brawl mod apk latest version 49.194
      -Nulls brawl mod apk with new brawlers and skins
      -Nulls brawl mod apk online multiplayer mode
      -Download nulls brawl mod apk for android devices
      -Nulls brawl mod apk offline installer
      -Download nulls brawl mod apk from happymod.com[^1^]
      -Nulls brawl mod apk features and gameplay
      -Download nulls brawl mod apk without root
      -Nulls brawl mod apk hack and cheats
      -Download nulls brawl mod apk for PC windows 10
      -Nulls brawl mod apk review and rating
      -Download nulls brawl mod apk from mediafire.com
      -Nulls brawl mod apk update and changelog
      -Download nulls brawl mod apk with obb data file
      -Nulls brawl mod apk tips and tricks
      -Download nulls brawl mod apk from apkpure.com
      -Nulls brawl mod apk comparison with original version
      -Download nulls brawl mod apk with custom maps and modes
      -Nulls brawl mod apk problems and solutions
      -Download nulls brawl mod apk from mega.nz
      -Nulls brawl mod apk best settings and configuration
      -Download nulls brawl mod apk with unlimited tickets and tokens
      -Nulls brawl mod apk pros and cons
      -Download nulls brawl mod apk from google drive
      -Nulls brawl mod apk system requirements and compatibility
      -Download nulls brawl mod apk with all characters unlocked
      -Nulls brawl mod apk feedback and comments
      -Download nulls brawl mod apk from uptodown.com

      -

      Step 2: Download the APK file

      -

      The next thing you need to do is to download the APK file of Nulls Brawl Mod APK. This is the file that contains the modded version of the game. You can find it on various websites, but make sure you choose a reputable and safe one. For example, you can use this link to download the latest version of Nulls Brawl Mod APK. Just click on the download button and wait for the file to be downloaded.

      -

      Step 3: Install the APK file

      -

      The last thing you need to do is to install the APK file on your device. To do this, locate the file in your device's storage, either using a file manager app or by going to your downloads folder. Then, tap on the file and follow the instructions on the screen. You might see a pop-up asking for permissions, but just allow them and continue. Once the installation is done, you can open the game and enjoy Nulls Brawl Mod APK.

      -

      How to play Nulls Brawl Mod APK?

      -

      Now that you have downloaded and installed Nulls Brawl Mod APK, you might be wondering how to play it. Well, it's very similar to the original Brawl Stars game, but with more features and fun. Here are some of the things you can do in Nulls Brawl Mod APK.

      -

      Choose your brawler

      -

      The first thing you need to do is to choose your brawler. As mentioned before, Nulls Brawl Mod APK gives you access to all brawlers from the start, so you can pick any one you like. You can also customize your brawler's appearance by changing their skin or pin. To do this, go to the brawlers menu and tap on the brawler you want to use. Then, tap on the skin or pin icon and select the one you want.

      -

      Join a game mode

      -

      The next thing you need to do is to join a game mode. Nulls Brawl Mod APK offers all the game modes that are available in the original Brawl Stars game, such as Gem Grab, Showdown, Brawl Ball, Bounty, Heist, and more. You can also play special events or friendly matches with your friends or club members. To join a game mode, go to the home screen and tap on the play button. Then, choose the game mode you want to play and tap on it.

      -

      Battle with other players

      -

      The last thing you need to do is to battle with other players. Nulls Brawl Mod APK lets you play with or against other players who are also using the modded version of the game. You can use your brawler's abilities, gadgets, and star powers to win matches and earn rewards. To battle with other players, just follow the instructions on the screen and have fun.

      Tips and tricks for Nulls Brawl Mod APK

      -

      If you want to get the most out of Nulls Brawl Mod APK, you might want to follow some tips and tricks that can help you improve your gameplay and have more fun. Here are some of them:

      -

      Experiment with different brawlers

      -

      One of the best things about Nulls Brawl Mod APK is that you can use any brawler you want, without having to unlock them or spend gems or coins. This means you can experiment with different brawlers and find the ones that suit your playstyle and preference. You can also learn their strengths and weaknesses, and how to counter them in battles. To experiment with different brawlers, just go to the brawlers menu and tap on the random button. You will be assigned a random brawler that you can use in the next match.

      -

      Team up with your friends

      -

      Another great thing about Nulls Brawl Mod APK is that you can team up with your friends or other players who are also using the modded version of the game. This can make the game more fun and exciting, as you can communicate and coordinate with your teammates, and use strategies and tactics to win matches. To team up with your friends, just go to the social menu and tap on the invite button. You can then send an invitation to your friends or club members, or join an existing team.

      -

      Upgrade your brawlers

      -

      Even though Nulls Brawl Mod APK gives you unlimited gems and coins, you still need to use them wisely to upgrade your brawlers and unlock their star powers and gadgets. Upgrading your brawlers can make them more powerful and effective in battles, as they can deal more damage, have more health, and use more skills. To upgrade your brawlers, just go to the brawlers menu and tap on the brawler you want to upgrade. Then, tap on the upgrade button and spend the required amount of coins.

      -

      Conclusion

      -

      Nulls Brawl Mod APK is a modified version of Brawl Stars that offers many benefits over the original game, such as unlimited gems, money, and characters. It also lets you play with or against other players who are using the same modded version of the game. You can download and install Nulls Brawl Mod APK easily by following the steps mentioned above. You can also play Nulls Brawl Mod APK by choosing your brawler, joining a game mode, and battling with other players. You can also follow some tips and tricks to improve your gameplay and have more fun.

      -

      If you are looking for a way to spice up your gaming experience and have more fun in Brawl Stars, you should definitely try Nulls Brawl Mod APK. It is a free, safe, and easy way to enjoy more features and fun in one of the most popular multiplayer games in the world. So what are you waiting for? Download Nulls Brawl Mod APK today and start brawling!

      -

      FAQs

      -

      Here are some of the frequently asked questions and answers about Nulls Brawl Mod APK:

      -
        -
      • Q: Is Nulls Brawl Mod APK safe?
      • -
      • A: Yes, Nulls Brawl Mod APK is safe to download and use. It does not contain any viruses or malware that can harm your device or data. However, you should always download it from a reputable source and scan it before installing it.
      • -
      • Q: Is Nulls Brawl Mod APK legal?
      • -
      • A: No, Nulls Brawl Mod APK is not legal. It is a modified version of Brawl Stars that violates the terms of service of Supercell, the developer of the game. Using Nulls Brawl Mod APK can result in a ban from the original game or legal action from Supercell.
      • -
      • Q: Can I play Nulls Brawl Mod APK with players who are using the original game?
      • -
      • A: No, you cannot play Nulls Brawl Mod APK with players who are using the original game. Nulls Brawl Mod APK uses a different server than the original game, so you can only play with or against other players who are using the same modded version of the game.
      • -
      • Q: Can I update Nulls Brawl Mod APK?
      • -
      • A: Yes, you can update Nulls Brawl Mod APK whenever there is a new version available. However, you cannot update it from the Google Play Store or the original game. You have to download the new version from a reputable source and install it manually.
      • -
      • Q: Can I transfer my progress from Nulls B
      • Q: Can I transfer my progress from Nulls Brawl Mod APK to the original game?
      • -
      • A: No, you cannot transfer your progress from Nulls Brawl Mod APK to the original game. Nulls Brawl Mod APK uses a different account and data system than the original game, so you cannot sync or transfer your progress between them.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Stick War Legacy MOD APK for Android - Unlimited Gems and More.md b/spaces/1phancelerku/anime-remove-background/Download Stick War Legacy MOD APK for Android - Unlimited Gems and More.md deleted file mode 100644 index e5272ab292f6be3391ecc43505af5063d7790bac..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Stick War Legacy MOD APK for Android - Unlimited Gems and More.md +++ /dev/null @@ -1,116 +0,0 @@ - -

      Download Stick War Legacy Mod APK Android: A Guide for Beginners

      -

      Are you a fan of strategy games? Do you want to experience a thrilling and addictive game that will challenge your skills and creativity? If yes, then you should try Stick War Legacy, one of the most popular and fun games on Android devices. And if you want to enjoy the game even more, you should download Stick War Legacy mod apk android, which will give you unlimited gems, gold, and other features that will make your gameplay easier and more enjoyable. In this article, we will tell you everything you need to know about Stick War Legacy and its mod apk version, including what it is, how to play it, why you should download it, and some tips and tricks to help you win. Let's get started!

      -

      download stick war legacy mod apk android


      Download ❤❤❤ https://jinyurl.com/2uNNV6



      -

      What is Stick War Legacy?

      -

      Stick War Legacy is a strategy game developed by Max Games Studios, where you control an army of stick figures in a world called Inamorta. In this world, different nations have their own unique weapons and technologies, such as archers, spears, swords, mages, and even giants. Your goal is to conquer all the territories and become the ruler of Inamorta. You can do this by building your own fort, recruiting and training your units, and attacking your enemies with different strategies. You can also play in different modes, such as campaign mode, survival mode, tournament mode, and endless mode.

      -

      Features of Stick War Legacy

      -

      Stick War Legacy has many features that make it an exciting and addictive game. Some of them are:

      -
        -
      • Stunning graphics and animations that bring the stick figures to life.
      • -
      • Simple and intuitive controls that allow you to command your army with ease.
      • -
      • A variety of units and skills that you can use to customize your army and strategy.
      • -
      • A challenging and rewarding gameplay that tests your skills and creativity.
      • -
      • A lot of content and modes that you can explore and enjoy.
      • -
      -

      How to play Stick War Legacy

      -

      Playing Stick War Legacy is easy and fun. Here are the basic steps that you need to follow:

      -
        -
      1. Choose a mode that you want to play. You can start with the campaign mode to learn the basics of the game.
      2. -
      3. Select a difficulty level that suits your preference. You can choose from normal, hard, or insane.
      4. -
      5. Start the game and build your fort. You can mine gold from the gold statue near your base. You can also build other structures, such as barracks, archery range, mage tower, etc.
      6. -
      7. Recruit and train your units. You can choose from miners, swordwrath, archidons, speartons, magikill, or giants. Each unit has its own strengths and weaknesses.
      8. -
      9. Attack your enemy's base. You can either control each unit individually or use the attack button to send all your units at once. You can also use special skills, such as heal, rage, summon minions, etc.
      10. -
      11. Destroy your enemy's statue and win the game. You can also capture their gold statue to get more resources.
      12. -
      -

      Why download Stick War Legacy mod apk android?

      -

      If you love playing Stick War Legacy but want to have more fun and convenience, then you should download Stick War Legacy mod apk android. This is a modified version of the game that gives you access to unlimited gems, gold, and other features that will enhance your gameplay. With Stick War Legacy mod apk android, you can:

      -

      Benefits of Stick War Legacy mod apk android

      -
        -
      • Get unlimited gems and gold that you can use to buy and upgrade anything you want in the game. You can also unlock all the skins and weapons for your units.
      • -
      • Enjoy unlimited stamina and health for your units. You can make them invincible and unstoppable in the battlefield.
      • -
      • Remove all the ads and pop-ups that may interrupt your gameplay. You can play the game without any distractions or annoyances.
      • -
      • Access all the features and modes of the game without any restrictions. You can play any level, mode, or difficulty that you want.
      • -
      -

      How to download and install Stick War Legacy mod apk android

      -

      Downloading and installing Stick War Legacy mod apk android is easy and fast. Here are the steps that you need to follow:

      -
        -
      1. Click on the link below to download the Stick War Legacy mod apk file. The file size is about 100 MB, so make sure you have enough space on your device.
      2. -
      3. After downloading the file, go to your device settings and enable the installation of apps from unknown sources. This will allow you to install the mod apk file.
      4. -
      5. Locate the downloaded file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for a few seconds.
      6. -
      7. Once the installation is complete, you can open the game and enjoy Stick War Legacy mod apk android. You will see that you have unlimited gems, gold, and other features in the game.
      8. -
      -

      Tips and tricks for Stick War Legacy mod apk android

      -

      Now that you have downloaded and installed Stick War Legacy mod apk android, you may wonder how to play it better and win more games. Here are some tips and tricks that you can use:

      -

      Choose your strategy wisely

      -

      Stick War Legacy is a strategy game, so you need to plan your moves carefully and adapt to different situations. You can choose from different strategies, such as offensive, defensive, balanced, or creative. Each strategy has its own advantages and disadvantages, so you need to consider your enemy's strengths and weaknesses, as well as your own resources and goals. For example, if you want to attack quickly and aggressively, you can use an offensive strategy with swordwrath and archidons. But if you want to defend your base and wait for an opportunity, you can use a defensive strategy with speartons and magikill.

      -

      Upgrade your units and skills

      -

      One of the best ways to improve your gameplay is to upgrade your units and skills. You can do this by using gems and gold that you get from winning games or from Stick War Legacy mod apk android. You can upgrade your units' health, damage, speed, armor, etc. You can also upgrade your skills' cooldown, duration, effect, etc. Upgrading your units and skills will make them more powerful and efficient in combat.

      -

      How to download stick war legacy mod apk for android devices
      -Stick war legacy mod apk unlimited gems and money download android
      -Download stick war legacy mod apk latest version 2023.2.85 for android
      -Stick war legacy mod apk android 1 download free
      -Download stick war legacy mod apk offline for android
      -Stick war legacy mod apk hack download android no root
      -Download stick war legacy mod apk revdl for android
      -Stick war legacy mod apk download android rexdl
      -Download stick war legacy mod apk with cheat menu for android
      -Stick war legacy mod apk download android happymod
      -Download stick war legacy mod apk unlocked everything for android
      -Stick war legacy mod apk download android apkpure
      -Download stick war legacy mod apk no ads for android
      -Stick war legacy mod apk download android 2023
      -Download stick war legacy mod apk new update for android
      -Stick war legacy mod apk download android unlimited health
      -Download stick war legacy mod apk full version for android
      -Stick war legacy mod apk download android mega
      -Download stick war legacy mod apk all skins unlocked for android
      -Stick war legacy mod apk download android mediafıre
      -Download stick war legacy mod apk free shopping for android
      -Stick war legacy mod apk download android 4.4.2
      -Download stick war legacy mod apk god mode for android
      -Stick war legacy mod apk download android 5.1.1
      -Download stick war legacy mod apk no verification for android
      -Stick war legacy mod apk download android 6.0.1
      -Download stick war legacy mod apk unlimited troops for android
      -Stick war legacy mod apk download android 7.0
      -Download stick war legacy mod apk with obb file for android
      -Stick war legacy mod apk download android 8.0
      -Download stick war legacy mod apk max level for android
      -Stick war legacy mod apk download android 9.0
      -Download stick war legacy mod apk infinite mana for android
      -Stick war legacy mod apk download android 10.0
      -Download stick war legacy mod apk no ban for android
      -Stick war legacy mod apk download android 11.0
      -Download stick war legacy mod apk all weapons unlocked for android
      -Stick war legacy mod apk download android zippyshare
      -Download stick war legacy mod apk high damage for android
      -Stick war legacy mod apk download android uptodown

      -

      Use gems and gold wisely

      -

      Gems and gold are the main currencies in Stick War Legacy. You can use them to buy and upgrade anything you want in the game. However, even though you have unlimited gems and gold from Stick War Legacy mod apk android, you should still use them wisely. You should not waste them on unnecessary things or buy everything at once. You should prioritize what you need most and save some for later. You should also balance your spending between units and skills, as well as between offense and defense.

      -

      Conclusion

      -

      Stick War Legacy is a fun and addictive strategy game that will keep you entertained for hours. You can control an army of stick figures in a world of war and conquer all the territories. You can also download Stick War Legacy mod apk android to get unlimited gems, gold, and other features that will make your gameplay easier and more enjoyable. In this article, we have told you everything you need to know about Stick War Legacy and its mod apk version, including what it is, how to play it, why you should download it, and some tips and tricks to help you win. We hope that this article has been helpful and informative for you. Now go ahead and download Stick War Legacy mod apk android and have fun!

      -

      Summary of the article

      -

      This article is a guide for beginners who want to download Stick War Legacy mod apk android. It covers the following topics:

      -
        -
      • What is Stick War Legacy?
      • -
      • Why download Stick War Legacy mod apk android?
      • -
      • How to download and install Stick War Legacy mod apk android?
      • -
      • Tips and tricks for Stick War Legacy mod apk android?
      • -
      -

      FAQs

      -

      Here are some frequently asked questions about Stick War Legacy mod apk android:

      -
        -
      1. Is Stick War Legacy mod apk android safe to use?
      2. -

        Yes, Stick War Legacy mod apk android is safe to use, as long as you download it from a trusted source. However, you should always be careful when downloading and installing any mod apk file, as some of them may contain viruses or malware that can harm your device. You should also backup your data before installing any mod apk file, in case something goes wrong.

        -
      3. Does Stick War Legacy mod apk android work on all devices?
      4. -

        Stick War Legacy mod apk android works on most Android devices that support the original version of the game. However, some devices may not be compatible with the mod apk file, or may experience some glitches or errors. You should check the compatibility of your device before downloading and installing Stick War Legacy mod apk android.

        -
      5. Can I play Stick War Legacy mod apk android online?
      6. -

        No, Stick War Legacy mod apk android is not an online game. You can only play it offline, on your own device. You cannot play it with other players or connect it to any social media platforms. You also cannot update the game or access any online features.

        -
      7. Can I uninstall Stick War Legacy mod apk android?
      8. -

        Yes, you can uninstall Stick War Legacy mod apk android anytime you want. You can do this by going to your device settings and finding the app in the list of installed apps. Then, you can tap on the app and select the uninstall option. You can also delete the mod apk file from your device storage.

        -
      9. Where can I download Stick War Legacy mod apk android?
      10. -

        You can download Stick War Legacy mod apk android from the link below. This is a trusted and reliable source that provides the latest and working version of the mod apk file. You can also find more information and reviews about the mod apk file on this site.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Enjoy the Best Gaming Experience with Clash of Clans Mod Menu APK Latest Version.md b/spaces/1phancelerku/anime-remove-background/Enjoy the Best Gaming Experience with Clash of Clans Mod Menu APK Latest Version.md deleted file mode 100644 index fc892701f0396ebe5b57e043d6bc62c1849d3add..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Enjoy the Best Gaming Experience with Clash of Clans Mod Menu APK Latest Version.md +++ /dev/null @@ -1,151 +0,0 @@ - -

      Clash of Clans Mod Menu APK Latest Version: Everything You Need to Know

      -

      If you are a fan of strategy games, you have probably heard of Clash of Clans, one of the most popular and addictive games in the world. But did you know that there is a way to make your gaming experience even more fun and exciting? Yes, we are talking about Clash of Clans Mod Menu APK, a modified version of the game that gives you unlimited resources and features. In this article, we will tell you everything you need to know about Clash of Clans Mod Menu APK latest version, including what it is, how to download and install it, how to use it, and more. So, let's get started!

      -

      What is Clash of Clans?

      -

      A brief introduction to the game

      -

      Clash of Clans is a freemium mobile strategy game developed and published by Supercell, a Finnish game company. The game was released in 2012 for iOS and in 2013 for Android devices. The game has over 500 million downloads on Google Play Store and over 10 million ratings on App Store.

      -

      clash of clans mod menu apk latest version


      Downloadhttps://jinyurl.com/2uNPFl



      -

      The game is set in a fantasy world where you have to build your own village, train your troops, and fight against other players online. You can also join or create clans with other players and participate in clan wars, clan games, and special events. The game is updated regularly with new content and features.

      -

      The main features of the game

      -

      Some of the main features of Clash of Clans are:

      -
        -
      • Build your own village with various buildings, defenses, traps, and decorations.
      • -
      • Train different types of troops with unique abilities and upgrade them with elixir or dark elixir.
      • -
      • Collect resources such as gold, elixir, dark elixir, gems, and clan points by raiding other villages or completing tasks.
      • -
      • Use spells and siege machines to support your attacks or defend your village.
      • -
      • Compete with other players in various leagues and tournaments for trophies and rewards.
      • -
      • Join or create clans with other players and chat, donate troops, request reinforcements, and cooperate in clan wars, clan games, and special events.
      • -
      • Explore new areas such as the Builder Base, the Town Hall 13, and the Super Troops.
      • -
      -

      What is Clash of Clans Mod Menu APK?

      -

      A modified version of the game with unlimited resources and features

      -

      Clash of Clans Mod Menu APK is a modified version of the original game that gives you access to unlimited resources and features. With this mod menu apk, you can enjoy the game without any limitations or restrictions. You can build your village as you wish, train any troops you want, attack any base you like, and join any clan you prefer.

      -

      Some of the unlimited resources and features that you can get with Clash of Clans Mod Menu APK are:

      -
        -
      • Unlimited gold, elixir, dark el
      • -
      -

      The benefits of using the mod menu apk

      -

      Some of the benefits of using Clash of Clans Mod Menu APK are:

      -
        -
      • You can save your time and money. You don't have to spend hours or dollars to progress in the game. You can get everything you need for free and instantly.
      • -
      • You can have more fun and excitement. You don't have to worry about running out of resources or losing battles. You can experiment with different strategies and tactics and enjoy the game to the fullest.
      • -
      • You can challenge yourself and others. You don't have to settle for the easy or boring levels. You can try the harder or more interesting ones and test your skills and creativity. You can also compete with other players who use the mod menu apk and see who is the best.
      • -
      -

      How to download and install Clash of Clans Mod Menu APK latest version?

      -

      The steps to download and install the mod menu apk

      -

      If you want to download and install Clash of Clans Mod Menu APK latest version, you need to follow these steps:

      -
        -
      1. Go to a trusted website that provides the mod menu apk file, such as [clashofclansmodapk.net].
      2. -
      3. Click on the download button and wait for the file to be downloaded on your device.
      4. -
      5. Go to your device settings and enable the installation of apps from unknown sources.
      6. -
      7. Locate the downloaded file in your file manager and tap on it to start the installation process.
      8. -
      9. Follow the instructions on the screen and wait for the installation to be completed.
      10. -
      11. Launch the game and enjoy the mod menu apk.
      12. -
      -

      The precautions to take before installing the mod menu apk

      -

      Before you install Clash of Clans Mod Menu APK latest version, you need to take some precautions to avoid any problems or risks. Here are some of them:

      -
        -
      • Make sure that your device has enough storage space and battery life for the installation process.
      • -
      • Make sure that you have a stable internet connection for the download process.
      • -
      • Make sure that you download the mod menu apk file from a trusted and secure website, not from a random or suspicious one.
      • -
      • Make sure that you backup your original game data before installing the mod menu apk, in case you want to switch back to the official version later.
      • -
      • Make sure that you do not use your real account or personal information when playing with the mod menu apk, as it may result in a ban or a hack.
      • -
      -

      How to use Clash of Clans Mod Menu APK latest version?

      The main options and settings of the mod menu apk

      -

      Once you launch the game with the mod menu apk, you will see a floating icon on the screen that gives you access to the mod menu. By tapping on this icon, you can open the mod menu and choose from various options and settings. Some of the main options and settings of the mod menu apk are:

      -

      clash of clans mod menu apk unlimited everything
      -clash of clans mod menu apk download for android
      -clash of clans mod menu apk offline mode
      -clash of clans mod menu apk no root required
      -clash of clans mod menu apk with save data
      -clash of clans mod menu apk free gems and gold
      -clash of clans mod menu apk anti ban protection
      -clash of clans mod menu apk latest update 2023
      -clash of clans mod menu apk hack online generator
      -clash of clans mod menu apk private server switcher
      -clash of clans mod menu apk custom troops and buildings
      -clash of clans mod menu apk unlock all heroes and skins
      -clash of clans mod menu apk unlimited dark elixir and elixir
      -clash of clans mod menu apk supercell id login
      -clash of clans mod menu apk easy installation guide
      -clash of clans mod menu apk working on all devices
      -clash of clans mod menu apk best strategy and tips
      -clash of clans mod menu apk unlimited clan wars and events
      -clash of clans mod menu apk fast and secure download link
      -clash of clans mod menu apk no survey or human verification
      -clash of clans mod menu apk unlimited builder base resources
      -clash of clans mod menu apk support all android versions
      -clash of clans mod menu apk friendly and responsive user interface
      -clash of clans mod menu apk auto update feature
      -clash of clans mod menu apk unlimited town hall 14 features
      -clash of clans mod menu apk real time multiplayer battles
      -clash of clans mod menu apk unlimited spells and troops training
      -clash of clans mod menu apk no ads or pop ups
      -clash of clans mod menu apk high quality graphics and sound effects
      -clash of clans mod menu apk unlimited siege machines and wall wreckers
      -clash of clans mod menu apk unlock all achievements and rewards
      -clash of clans mod menu apk unlimited builder potions and books
      -clash of clans mod menu apk support all languages and regions
      -clash of clans mod menu apk 100% safe and virus free
      -clash of clans mod menu apk unlimited royal champion and grand warden abilities
      -clash of clans mod menu apk unlimited clan games and challenges
      -clash of clans mod menu apk unlock all clan perks and badges
      -clash of clans mod menu apk unlimited season pass and gold pass benefits
      -clash of clans mod menu apk unlimited magic items and hammers
      -clash of clans mod menu apk support all game modes and scenarios

      -
        -
      • Resources: You can adjust the amount of gold, elixir, dark elixir, gems, and clan points that you have in your account. You can also refill your resources whenever you want.
      • -
      • Troops: You can select any troops that you want to train, even if they are not available in your town hall level or barracks. You can also change the level and quantity of your troops.
      • -
      • Spells: You can select any spells that you want to use, even if they are not available in your town hall level or spell factory. You can also change the level and quantity of your spells.
      • -
      • Siege Machines: You can select any siege machines that you want to use, even if they are not available in your town hall level or workshop. You can also change the level and quantity of your siege machines.
      • -
      • Builder Base: You can access the Builder Base without any requirements or restrictions. You can also adjust the amount of gold, elixir, gems, and builder trophies that you have in your Builder Base account.
      • -
      • Town Hall 13: You can access the Town Hall 13 without any requirements or restrictions. You can also upgrade your town hall, buildings, troops, spells, and siege machines to level 13.
      • -
      • Super Troops: You can access the Super Troops without any requirements or restrictions. You can also select any Super Troops that you want to use and change their level and quantity.
      • -
      • Customization: You can change your village name, clan name, clan badge, profile picture, and chat color as many times as you want.
      • -
      • Cheats and Hacks: You can enable or disable various cheats and hacks that can enhance your gaming experience, such as auto-attack, auto-collect, auto-train, auto-upgrade, anti-ban, and more.
      • -
      -

      The tips and tricks to enjoy the mod menu apk

      -

      Here are some tips and tricks that can help you enjoy Clash of Clans Mod Menu APK latest version:

      -
        -
      • Use the mod menu apk for fun and entertainment only. Do not use it for malicious or illegal purposes.
      • -
      • Do not abuse the mod menu apk or spam other players with it. Be respectful and fair to other players.
      • -
      • Do not share your mod menu apk account or personal information with anyone. Keep your account safe and secure.
      • -
      • Do not update the game from the official sources or uninstall the mod menu apk without backing up your data. You may lose your progress or data.
      • -
      • Do not play with the mod menu apk on public or unsecured networks. You may expose your device or account to viruses or hackers.
      • -
      -

      Conclusion

      -

      A summary of the main points of the article

      -

      In conclusion, Clash of Clans Mod Menu APK latest version is a modified version of the original game that gives you unlimited resources and features. With this mod menu apk, you can enjoy the game without any limitations or restrictions. You can build your village as you wish, train any troops you want, attack any base you like, and join any clan you prefer. You can also explore new areas such as the Builder Base, the Town Hall 13, and the Super Troops. You can also customize your village name, clan name, clan badge, profile picture, and chat color as many times as you want. You can also use various cheats and hacks to enhance your gaming experience.

      -

      A call to action for the readers

      -

      If you are interested in trying out Clash of Clans Mod Menu APK latest version, you can download it from [clashofclansmodapk.net] and follow the steps to install it on your device. However, before you do that, make sure that you take some precautions to avoid any problems or risks. Also, make sure that you use the mod menu apk for fun and entertainment only and not for malicious or illegal purposes. And remember to be respectful and fair to other players who play with the official version of the game.

      -

      We hope that this article has given you all the information you need to know about Clash of Clans Mod Menu APK latest version. If you have any questions or feedback, feel free to leave a comment below. And if you liked this article, please share it with your friends who might be interested in Clash of Clans Mod Menu APK. And don't forget to check out our other articles on similar topics. Thank you for reading and happy gaming!

      -

      FAQs

      -

      Here are some of the frequently asked questions about Clash of Clans Mod Menu APK latest version:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      QuestionAnswer
      Is Clash of Clans Mod Menu APK safe to use?Clash of Clans Mod Menu APK is safe to use as long as you download it from a trusted and secure website, such as [clashofclansmodapk.net]. However, you should always be careful when installing apps from unknown sources and take some precautions to protect your device and account.
      Is Clash of Clans Mod Menu APK legal to use?Clash of Clans Mod Menu APK is not legal to use, as it violates the terms and conditions of the original game. Using the mod menu apk may result in a ban or a hack from the official game servers. Therefore, you should use the mod menu apk at your own risk and responsibility.
      Can I play with the mod menu apk online?Yes, you can play with the mod menu apk online, as it connects to the same servers as the official game. However, you may encounter some problems or errors when playing with the mod menu apk online, such as lagging, crashing, or mismatching. Also, you may face some backlash or complaints from other players who play with the official version of the game.
      Can I play with the mod menu apk offline?Yes, you can play with the mod menu apk offline, as it does not require an internet connection to run. However, you may miss some features or updates that are only available online, such as clan wars, clan games, and special events.
      Can I switch back to the official version of the game after using the mod menu apk?Yes, you can switch back to the official version of the game after using the mod menu apk, as long as you backup your original game data before installing the mod menu apk. To switch back to the official version of the game, you need to uninstall the mod menu apk and reinstall the original game from the official sources.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/nets_537227KB.py b/spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/nets_537227KB.py deleted file mode 100644 index 1ceac4a470ca311d594818d52e5f96919cfddb26..0000000000000000000000000000000000000000 --- a/spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/nets_537227KB.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch -import numpy as np -from torch import nn -import torch.nn.functional as F - -from uvr5_pack.lib_v5 import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/AIConsultant/MusicGen/audiocraft/grids/musicgen/_explorers.py b/spaces/AIConsultant/MusicGen/audiocraft/grids/musicgen/_explorers.py deleted file mode 100644 index 334836b72559a120feb8a15eef3fe96ce88a4edb..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/audiocraft/grids/musicgen/_explorers.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -import treetable as tt - -from .._base_explorers import BaseExplorer - - -class LMExplorer(BaseExplorer): - eval_metrics: tp.List[str] = [] - - def stages(self) -> tp.List[str]: - return ['train', 'valid'] - - def get_grid_metrics(self): - """Return the metrics that should be displayed in the tracking table.""" - return [ - tt.group( - 'train', - [ - tt.leaf('epoch'), - tt.leaf('duration', '.1f'), # duration in minutes - tt.leaf('ping'), - tt.leaf('ce', '.4f'), # cross entropy - tt.leaf("ppl", '.3f'), # perplexity - ], - align='>', - ), - tt.group( - 'valid', - [ - tt.leaf('ce', '.4f'), - tt.leaf('ppl', '.3f'), - tt.leaf('best_ppl', '.3f'), - ], - align='>', - ), - ] - - def process_sheep(self, sheep, history): - parts = super().process_sheep(sheep, history) - - track_by = {'ppl': 'lower'} # values should be in ['lower', 'higher'] - best_metrics = {k: (1 if v == 'lower' else -1) * float('inf') for k, v in track_by.items()} - - def comparator(mode, a, b): - return a < b if mode == 'lower' else a > b - - for metrics in history: - for key, sub in metrics.items(): - for metric in track_by: - # for the validation set, keep track of best metrics (ppl in this example) - # this is so we can conveniently compare metrics between runs in the grid - if key == 'valid' and metric in sub and comparator( - track_by[metric], sub[metric], best_metrics[metric] - ): - best_metrics[metric] = sub[metric] - - if 'valid' in parts: - parts['valid'].update({f'best_{k}': v for k, v in best_metrics.items()}) - return parts - - -class GenerationEvalExplorer(BaseExplorer): - eval_metrics: tp.List[str] = [] - - def stages(self) -> tp.List[str]: - return ['evaluate'] - - def get_grid_metrics(self): - """Return the metrics that should be displayed in the tracking table.""" - return [ - tt.group( - 'evaluate', - [ - tt.leaf('epoch', '.3f'), - tt.leaf('duration', '.1f'), - tt.leaf('ping'), - tt.leaf('ce', '.4f'), - tt.leaf('ppl', '.3f'), - tt.leaf('fad', '.3f'), - tt.leaf('kld', '.3f'), - tt.leaf('text_consistency', '.3f'), - tt.leaf('chroma_cosine', '.3f'), - ], - align='>', - ), - ] diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192.py deleted file mode 100644 index 7db9e0f047b8714df07776fc77880c9f5ca09478..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192.py +++ /dev/null @@ -1,2861 +0,0 @@ -default_scope = 'mmpose' -default_hooks = dict( - timer=dict(type='IterTimerHook'), - logger=dict(type='LoggerHook', interval=50), - param_scheduler=dict(type='ParamSchedulerHook'), - checkpoint=dict( - type='CheckpointHook', interval=10, save_best='PCK', rule='greater'), - sampler_seed=dict(type='DistSamplerSeedHook'), - visualization=dict(type='PoseVisualizationHook', enable=False)) -custom_hooks = [dict(type='SyncBuffersHook')] -env_cfg = dict( - cudnn_benchmark=False, - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), - dist_cfg=dict(backend='nccl')) -vis_backends = [dict(type='LocalVisBackend')] -visualizer = dict( - type='PoseLocalVisualizer', - vis_backends=[dict(type='LocalVisBackend'), - dict(type='WandbVisBackend')], - name='visualizer') -log_processor = dict( - type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) -log_level = 'INFO' -load_from = None -resume = False -backend_args = dict(backend='local') -train_cfg = dict(by_epoch=True, max_epochs=120, val_interval=10) -val_cfg = dict() -test_cfg = dict() -colors = dict( - sss=[255, 128, 0], - lss=[255, 0, 128], - sso=[128, 0, 255], - lso=[0, 128, 255], - vest=[0, 128, 128], - sling=[0, 0, 128], - shorts=[128, 128, 128], - trousers=[128, 0, 128], - skirt=[64, 128, 128], - ssd=[64, 64, 128], - lsd=[128, 64, 0], - vd=[128, 64, 255], - sd=[128, 64, 0]) -dataset_info = dict( - dataset_name='deepfashion2', - paper_info=dict( - author= - 'Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo', - title= - 'DeepFashion2: A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images', - container= - 'Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)', - year='2019', - homepage='https://github.com/switchablenorms/DeepFashion2'), - keypoint_info=dict({ - 0: - dict(name='sss_kpt1', id=0, color=[255, 128, 0], type='', swap=''), - 1: - dict( - name='sss_kpt2', - id=1, - color=[255, 128, 0], - type='', - swap='sss_kpt6'), - 2: - dict( - name='sss_kpt3', - id=2, - color=[255, 128, 0], - type='', - swap='sss_kpt5'), - 3: - dict(name='sss_kpt4', id=3, color=[255, 128, 0], type='', swap=''), - 4: - dict( - name='sss_kpt5', - id=4, - color=[255, 128, 0], - type='', - swap='sss_kpt3'), - 5: - dict( - name='sss_kpt6', - id=5, - color=[255, 128, 0], - type='', - swap='sss_kpt2'), - 6: - dict( - name='sss_kpt7', - id=6, - color=[255, 128, 0], - type='', - swap='sss_kpt25'), - 7: - dict( - name='sss_kpt8', - id=7, - color=[255, 128, 0], - type='', - swap='sss_kpt24'), - 8: - dict( - name='sss_kpt9', - id=8, - color=[255, 128, 0], - type='', - swap='sss_kpt23'), - 9: - dict( - name='sss_kpt10', - id=9, - color=[255, 128, 0], - type='', - swap='sss_kpt22'), - 10: - dict( - name='sss_kpt11', - id=10, - color=[255, 128, 0], - type='', - swap='sss_kpt21'), - 11: - dict( - name='sss_kpt12', - id=11, - color=[255, 128, 0], - type='', - swap='sss_kpt20'), - 12: - dict( - name='sss_kpt13', - id=12, - color=[255, 128, 0], - type='', - swap='sss_kpt19'), - 13: - dict( - name='sss_kpt14', - id=13, - color=[255, 128, 0], - type='', - swap='sss_kpt18'), - 14: - dict( - name='sss_kpt15', - id=14, - color=[255, 128, 0], - type='', - swap='sss_kpt17'), - 15: - dict(name='sss_kpt16', id=15, color=[255, 128, 0], type='', swap=''), - 16: - dict( - name='sss_kpt17', - id=16, - color=[255, 128, 0], - type='', - swap='sss_kpt15'), - 17: - dict( - name='sss_kpt18', - id=17, - color=[255, 128, 0], - type='', - swap='sss_kpt14'), - 18: - dict( - name='sss_kpt19', - id=18, - color=[255, 128, 0], - type='', - swap='sss_kpt13'), - 19: - dict( - name='sss_kpt20', - id=19, - color=[255, 128, 0], - type='', - swap='sss_kpt12'), - 20: - dict( - name='sss_kpt21', - id=20, - color=[255, 128, 0], - type='', - swap='sss_kpt11'), - 21: - dict( - name='sss_kpt22', - id=21, - color=[255, 128, 0], - type='', - swap='sss_kpt10'), - 22: - dict( - name='sss_kpt23', - id=22, - color=[255, 128, 0], - type='', - swap='sss_kpt9'), - 23: - dict( - name='sss_kpt24', - id=23, - color=[255, 128, 0], - type='', - swap='sss_kpt8'), - 24: - dict( - name='sss_kpt25', - id=24, - color=[255, 128, 0], - type='', - swap='sss_kpt7'), - 25: - dict(name='lss_kpt1', id=25, color=[255, 0, 128], type='', swap=''), - 26: - dict( - name='lss_kpt2', - id=26, - color=[255, 0, 128], - type='', - swap='lss_kpt6'), - 27: - dict( - name='lss_kpt3', - id=27, - color=[255, 0, 128], - type='', - swap='lss_kpt5'), - 28: - dict(name='lss_kpt4', id=28, color=[255, 0, 128], type='', swap=''), - 29: - dict( - name='lss_kpt5', - id=29, - color=[255, 0, 128], - type='', - swap='lss_kpt3'), - 30: - dict( - name='lss_kpt6', - id=30, - color=[255, 0, 128], - type='', - swap='lss_kpt2'), - 31: - dict( - name='lss_kpt7', - id=31, - color=[255, 0, 128], - type='', - swap='lss_kpt33'), - 32: - dict( - name='lss_kpt8', - id=32, - color=[255, 0, 128], - type='', - swap='lss_kpt32'), - 33: - dict( - name='lss_kpt9', - id=33, - color=[255, 0, 128], - type='', - swap='lss_kpt31'), - 34: - dict( - name='lss_kpt10', - id=34, - color=[255, 0, 128], - type='', - swap='lss_kpt30'), - 35: - dict( - name='lss_kpt11', - id=35, - color=[255, 0, 128], - type='', - swap='lss_kpt29'), - 36: - dict( - name='lss_kpt12', - id=36, - color=[255, 0, 128], - type='', - swap='lss_kpt28'), - 37: - dict( - name='lss_kpt13', - id=37, - color=[255, 0, 128], - type='', - swap='lss_kpt27'), - 38: - dict( - name='lss_kpt14', - id=38, - color=[255, 0, 128], - type='', - swap='lss_kpt26'), - 39: - dict( - name='lss_kpt15', - id=39, - color=[255, 0, 128], - type='', - swap='lss_kpt25'), - 40: - dict( - name='lss_kpt16', - id=40, - color=[255, 0, 128], - type='', - swap='lss_kpt24'), - 41: - dict( - name='lss_kpt17', - id=41, - color=[255, 0, 128], - type='', - swap='lss_kpt23'), - 42: - dict( - name='lss_kpt18', - id=42, - color=[255, 0, 128], - type='', - swap='lss_kpt22'), - 43: - dict( - name='lss_kpt19', - id=43, - color=[255, 0, 128], - type='', - swap='lss_kpt21'), - 44: - dict(name='lss_kpt20', id=44, color=[255, 0, 128], type='', swap=''), - 45: - dict( - name='lss_kpt21', - id=45, - color=[255, 0, 128], - type='', - swap='lss_kpt19'), - 46: - dict( - name='lss_kpt22', - id=46, - color=[255, 0, 128], - type='', - swap='lss_kpt18'), - 47: - dict( - name='lss_kpt23', - id=47, - color=[255, 0, 128], - type='', - swap='lss_kpt17'), - 48: - dict( - name='lss_kpt24', - id=48, - color=[255, 0, 128], - type='', - swap='lss_kpt16'), - 49: - dict( - name='lss_kpt25', - id=49, - color=[255, 0, 128], - type='', - swap='lss_kpt15'), - 50: - dict( - name='lss_kpt26', - id=50, - color=[255, 0, 128], - type='', - swap='lss_kpt14'), - 51: - dict( - name='lss_kpt27', - id=51, - color=[255, 0, 128], - type='', - swap='lss_kpt13'), - 52: - dict( - name='lss_kpt28', - id=52, - color=[255, 0, 128], - type='', - swap='lss_kpt12'), - 53: - dict( - name='lss_kpt29', - id=53, - color=[255, 0, 128], - type='', - swap='lss_kpt11'), - 54: - dict( - name='lss_kpt30', - id=54, - color=[255, 0, 128], - type='', - swap='lss_kpt10'), - 55: - dict( - name='lss_kpt31', - id=55, - color=[255, 0, 128], - type='', - swap='lss_kpt9'), - 56: - dict( - name='lss_kpt32', - id=56, - color=[255, 0, 128], - type='', - swap='lss_kpt8'), - 57: - dict( - name='lss_kpt33', - id=57, - color=[255, 0, 128], - type='', - swap='lss_kpt7'), - 58: - dict(name='sso_kpt1', id=58, color=[128, 0, 255], type='', swap=''), - 59: - dict( - name='sso_kpt2', - id=59, - color=[128, 0, 255], - type='', - swap='sso_kpt26'), - 60: - dict( - name='sso_kpt3', - id=60, - color=[128, 0, 255], - type='', - swap='sso_kpt5'), - 61: - dict( - name='sso_kpt4', - id=61, - color=[128, 0, 255], - type='', - swap='sso_kpt6'), - 62: - dict( - name='sso_kpt5', - id=62, - color=[128, 0, 255], - type='', - swap='sso_kpt3'), - 63: - dict( - name='sso_kpt6', - id=63, - color=[128, 0, 255], - type='', - swap='sso_kpt4'), - 64: - dict( - name='sso_kpt7', - id=64, - color=[128, 0, 255], - type='', - swap='sso_kpt25'), - 65: - dict( - name='sso_kpt8', - id=65, - color=[128, 0, 255], - type='', - swap='sso_kpt24'), - 66: - dict( - name='sso_kpt9', - id=66, - color=[128, 0, 255], - type='', - swap='sso_kpt23'), - 67: - dict( - name='sso_kpt10', - id=67, - color=[128, 0, 255], - type='', - swap='sso_kpt22'), - 68: - dict( - name='sso_kpt11', - id=68, - color=[128, 0, 255], - type='', - swap='sso_kpt21'), - 69: - dict( - name='sso_kpt12', - id=69, - color=[128, 0, 255], - type='', - swap='sso_kpt20'), - 70: - dict( - name='sso_kpt13', - id=70, - color=[128, 0, 255], - type='', - swap='sso_kpt19'), - 71: - dict( - name='sso_kpt14', - id=71, - color=[128, 0, 255], - type='', - swap='sso_kpt18'), - 72: - dict( - name='sso_kpt15', - id=72, - color=[128, 0, 255], - type='', - swap='sso_kpt17'), - 73: - dict( - name='sso_kpt16', - id=73, - color=[128, 0, 255], - type='', - swap='sso_kpt29'), - 74: - dict( - name='sso_kpt17', - id=74, - color=[128, 0, 255], - type='', - swap='sso_kpt15'), - 75: - dict( - name='sso_kpt18', - id=75, - color=[128, 0, 255], - type='', - swap='sso_kpt14'), - 76: - dict( - name='sso_kpt19', - id=76, - color=[128, 0, 255], - type='', - swap='sso_kpt13'), - 77: - dict( - name='sso_kpt20', - id=77, - color=[128, 0, 255], - type='', - swap='sso_kpt12'), - 78: - dict( - name='sso_kpt21', - id=78, - color=[128, 0, 255], - type='', - swap='sso_kpt11'), - 79: - dict( - name='sso_kpt22', - id=79, - color=[128, 0, 255], - type='', - swap='sso_kpt10'), - 80: - dict( - name='sso_kpt23', - id=80, - color=[128, 0, 255], - type='', - swap='sso_kpt9'), - 81: - dict( - name='sso_kpt24', - id=81, - color=[128, 0, 255], - type='', - swap='sso_kpt8'), - 82: - dict( - name='sso_kpt25', - id=82, - color=[128, 0, 255], - type='', - swap='sso_kpt7'), - 83: - dict( - name='sso_kpt26', - id=83, - color=[128, 0, 255], - type='', - swap='sso_kpt2'), - 84: - dict( - name='sso_kpt27', - id=84, - color=[128, 0, 255], - type='', - swap='sso_kpt30'), - 85: - dict( - name='sso_kpt28', - id=85, - color=[128, 0, 255], - type='', - swap='sso_kpt31'), - 86: - dict( - name='sso_kpt29', - id=86, - color=[128, 0, 255], - type='', - swap='sso_kpt16'), - 87: - dict( - name='sso_kpt30', - id=87, - color=[128, 0, 255], - type='', - swap='sso_kpt27'), - 88: - dict( - name='sso_kpt31', - id=88, - color=[128, 0, 255], - type='', - swap='sso_kpt28'), - 89: - dict(name='lso_kpt1', id=89, color=[0, 128, 255], type='', swap=''), - 90: - dict( - name='lso_kpt2', - id=90, - color=[0, 128, 255], - type='', - swap='lso_kpt6'), - 91: - dict( - name='lso_kpt3', - id=91, - color=[0, 128, 255], - type='', - swap='lso_kpt5'), - 92: - dict( - name='lso_kpt4', - id=92, - color=[0, 128, 255], - type='', - swap='lso_kpt34'), - 93: - dict( - name='lso_kpt5', - id=93, - color=[0, 128, 255], - type='', - swap='lso_kpt3'), - 94: - dict( - name='lso_kpt6', - id=94, - color=[0, 128, 255], - type='', - swap='lso_kpt2'), - 95: - dict( - name='lso_kpt7', - id=95, - color=[0, 128, 255], - type='', - swap='lso_kpt33'), - 96: - dict( - name='lso_kpt8', - id=96, - color=[0, 128, 255], - type='', - swap='lso_kpt32'), - 97: - dict( - name='lso_kpt9', - id=97, - color=[0, 128, 255], - type='', - swap='lso_kpt31'), - 98: - dict( - name='lso_kpt10', - id=98, - color=[0, 128, 255], - type='', - swap='lso_kpt30'), - 99: - dict( - name='lso_kpt11', - id=99, - color=[0, 128, 255], - type='', - swap='lso_kpt29'), - 100: - dict( - name='lso_kpt12', - id=100, - color=[0, 128, 255], - type='', - swap='lso_kpt28'), - 101: - dict( - name='lso_kpt13', - id=101, - color=[0, 128, 255], - type='', - swap='lso_kpt27'), - 102: - dict( - name='lso_kpt14', - id=102, - color=[0, 128, 255], - type='', - swap='lso_kpt26'), - 103: - dict( - name='lso_kpt15', - id=103, - color=[0, 128, 255], - type='', - swap='lso_kpt25'), - 104: - dict( - name='lso_kpt16', - id=104, - color=[0, 128, 255], - type='', - swap='lso_kpt24'), - 105: - dict( - name='lso_kpt17', - id=105, - color=[0, 128, 255], - type='', - swap='lso_kpt23'), - 106: - dict( - name='lso_kpt18', - id=106, - color=[0, 128, 255], - type='', - swap='lso_kpt22'), - 107: - dict( - name='lso_kpt19', - id=107, - color=[0, 128, 255], - type='', - swap='lso_kpt21'), - 108: - dict( - name='lso_kpt20', - id=108, - color=[0, 128, 255], - type='', - swap='lso_kpt37'), - 109: - dict( - name='lso_kpt21', - id=109, - color=[0, 128, 255], - type='', - swap='lso_kpt19'), - 110: - dict( - name='lso_kpt22', - id=110, - color=[0, 128, 255], - type='', - swap='lso_kpt18'), - 111: - dict( - name='lso_kpt23', - id=111, - color=[0, 128, 255], - type='', - swap='lso_kpt17'), - 112: - dict( - name='lso_kpt24', - id=112, - color=[0, 128, 255], - type='', - swap='lso_kpt16'), - 113: - dict( - name='lso_kpt25', - id=113, - color=[0, 128, 255], - type='', - swap='lso_kpt15'), - 114: - dict( - name='lso_kpt26', - id=114, - color=[0, 128, 255], - type='', - swap='lso_kpt14'), - 115: - dict( - name='lso_kpt27', - id=115, - color=[0, 128, 255], - type='', - swap='lso_kpt13'), - 116: - dict( - name='lso_kpt28', - id=116, - color=[0, 128, 255], - type='', - swap='lso_kpt12'), - 117: - dict( - name='lso_kpt29', - id=117, - color=[0, 128, 255], - type='', - swap='lso_kpt11'), - 118: - dict( - name='lso_kpt30', - id=118, - color=[0, 128, 255], - type='', - swap='lso_kpt10'), - 119: - dict( - name='lso_kpt31', - id=119, - color=[0, 128, 255], - type='', - swap='lso_kpt9'), - 120: - dict( - name='lso_kpt32', - id=120, - color=[0, 128, 255], - type='', - swap='lso_kpt8'), - 121: - dict( - name='lso_kpt33', - id=121, - color=[0, 128, 255], - type='', - swap='lso_kpt7'), - 122: - dict( - name='lso_kpt34', - id=122, - color=[0, 128, 255], - type='', - swap='lso_kpt4'), - 123: - dict( - name='lso_kpt35', - id=123, - color=[0, 128, 255], - type='', - swap='lso_kpt38'), - 124: - dict( - name='lso_kpt36', - id=124, - color=[0, 128, 255], - type='', - swap='lso_kpt39'), - 125: - dict( - name='lso_kpt37', - id=125, - color=[0, 128, 255], - type='', - swap='lso_kpt20'), - 126: - dict( - name='lso_kpt38', - id=126, - color=[0, 128, 255], - type='', - swap='lso_kpt35'), - 127: - dict( - name='lso_kpt39', - id=127, - color=[0, 128, 255], - type='', - swap='lso_kpt36'), - 128: - dict(name='vest_kpt1', id=128, color=[0, 128, 128], type='', swap=''), - 129: - dict( - name='vest_kpt2', - id=129, - color=[0, 128, 128], - type='', - swap='vest_kpt6'), - 130: - dict( - name='vest_kpt3', - id=130, - color=[0, 128, 128], - type='', - swap='vest_kpt5'), - 131: - dict(name='vest_kpt4', id=131, color=[0, 128, 128], type='', swap=''), - 132: - dict( - name='vest_kpt5', - id=132, - color=[0, 128, 128], - type='', - swap='vest_kpt3'), - 133: - dict( - name='vest_kpt6', - id=133, - color=[0, 128, 128], - type='', - swap='vest_kpt2'), - 134: - dict( - name='vest_kpt7', - id=134, - color=[0, 128, 128], - type='', - swap='vest_kpt15'), - 135: - dict( - name='vest_kpt8', - id=135, - color=[0, 128, 128], - type='', - swap='vest_kpt14'), - 136: - dict( - name='vest_kpt9', - id=136, - color=[0, 128, 128], - type='', - swap='vest_kpt13'), - 137: - dict( - name='vest_kpt10', - id=137, - color=[0, 128, 128], - type='', - swap='vest_kpt12'), - 138: - dict(name='vest_kpt11', id=138, color=[0, 128, 128], type='', swap=''), - 139: - dict( - name='vest_kpt12', - id=139, - color=[0, 128, 128], - type='', - swap='vest_kpt10'), - 140: - dict(name='vest_kpt13', id=140, color=[0, 128, 128], type='', swap=''), - 141: - dict( - name='vest_kpt14', - id=141, - color=[0, 128, 128], - type='', - swap='vest_kpt8'), - 142: - dict( - name='vest_kpt15', - id=142, - color=[0, 128, 128], - type='', - swap='vest_kpt7'), - 143: - dict(name='sling_kpt1', id=143, color=[0, 0, 128], type='', swap=''), - 144: - dict( - name='sling_kpt2', - id=144, - color=[0, 0, 128], - type='', - swap='sling_kpt6'), - 145: - dict( - name='sling_kpt3', - id=145, - color=[0, 0, 128], - type='', - swap='sling_kpt5'), - 146: - dict(name='sling_kpt4', id=146, color=[0, 0, 128], type='', swap=''), - 147: - dict( - name='sling_kpt5', - id=147, - color=[0, 0, 128], - type='', - swap='sling_kpt3'), - 148: - dict( - name='sling_kpt6', - id=148, - color=[0, 0, 128], - type='', - swap='sling_kpt2'), - 149: - dict( - name='sling_kpt7', - id=149, - color=[0, 0, 128], - type='', - swap='sling_kpt15'), - 150: - dict( - name='sling_kpt8', - id=150, - color=[0, 0, 128], - type='', - swap='sling_kpt14'), - 151: - dict( - name='sling_kpt9', - id=151, - color=[0, 0, 128], - type='', - swap='sling_kpt13'), - 152: - dict( - name='sling_kpt10', - id=152, - color=[0, 0, 128], - type='', - swap='sling_kpt12'), - 153: - dict(name='sling_kpt11', id=153, color=[0, 0, 128], type='', swap=''), - 154: - dict( - name='sling_kpt12', - id=154, - color=[0, 0, 128], - type='', - swap='sling_kpt10'), - 155: - dict( - name='sling_kpt13', - id=155, - color=[0, 0, 128], - type='', - swap='sling_kpt9'), - 156: - dict( - name='sling_kpt14', - id=156, - color=[0, 0, 128], - type='', - swap='sling_kpt8'), - 157: - dict( - name='sling_kpt15', - id=157, - color=[0, 0, 128], - type='', - swap='sling_kpt7'), - 158: - dict( - name='shorts_kpt1', - id=158, - color=[128, 128, 128], - type='', - swap='shorts_kpt3'), - 159: - dict( - name='shorts_kpt2', - id=159, - color=[128, 128, 128], - type='', - swap=''), - 160: - dict( - name='shorts_kpt3', - id=160, - color=[128, 128, 128], - type='', - swap='shorts_kpt1'), - 161: - dict( - name='shorts_kpt4', - id=161, - color=[128, 128, 128], - type='', - swap='shorts_kpt10'), - 162: - dict( - name='shorts_kpt5', - id=162, - color=[128, 128, 128], - type='', - swap='shorts_kpt9'), - 163: - dict( - name='shorts_kpt6', - id=163, - color=[128, 128, 128], - type='', - swap='shorts_kpt8'), - 164: - dict( - name='shorts_kpt7', - id=164, - color=[128, 128, 128], - type='', - swap=''), - 165: - dict( - name='shorts_kpt8', - id=165, - color=[128, 128, 128], - type='', - swap='shorts_kpt6'), - 166: - dict( - name='shorts_kpt9', - id=166, - color=[128, 128, 128], - type='', - swap='shorts_kpt5'), - 167: - dict( - name='shorts_kpt10', - id=167, - color=[128, 128, 128], - type='', - swap='shorts_kpt4'), - 168: - dict( - name='trousers_kpt1', - id=168, - color=[128, 0, 128], - type='', - swap='trousers_kpt3'), - 169: - dict( - name='trousers_kpt2', - id=169, - color=[128, 0, 128], - type='', - swap=''), - 170: - dict( - name='trousers_kpt3', - id=170, - color=[128, 0, 128], - type='', - swap='trousers_kpt1'), - 171: - dict( - name='trousers_kpt4', - id=171, - color=[128, 0, 128], - type='', - swap='trousers_kpt14'), - 172: - dict( - name='trousers_kpt5', - id=172, - color=[128, 0, 128], - type='', - swap='trousers_kpt13'), - 173: - dict( - name='trousers_kpt6', - id=173, - color=[128, 0, 128], - type='', - swap='trousers_kpt12'), - 174: - dict( - name='trousers_kpt7', - id=174, - color=[128, 0, 128], - type='', - swap='trousers_kpt11'), - 175: - dict( - name='trousers_kpt8', - id=175, - color=[128, 0, 128], - type='', - swap='trousers_kpt10'), - 176: - dict( - name='trousers_kpt9', - id=176, - color=[128, 0, 128], - type='', - swap=''), - 177: - dict( - name='trousers_kpt10', - id=177, - color=[128, 0, 128], - type='', - swap='trousers_kpt8'), - 178: - dict( - name='trousers_kpt11', - id=178, - color=[128, 0, 128], - type='', - swap='trousers_kpt7'), - 179: - dict( - name='trousers_kpt12', - id=179, - color=[128, 0, 128], - type='', - swap='trousers_kpt6'), - 180: - dict( - name='trousers_kpt13', - id=180, - color=[128, 0, 128], - type='', - swap='trousers_kpt5'), - 181: - dict( - name='trousers_kpt14', - id=181, - color=[128, 0, 128], - type='', - swap='trousers_kpt4'), - 182: - dict( - name='skirt_kpt1', - id=182, - color=[64, 128, 128], - type='', - swap='skirt_kpt3'), - 183: - dict( - name='skirt_kpt2', id=183, color=[64, 128, 128], type='', swap=''), - 184: - dict( - name='skirt_kpt3', - id=184, - color=[64, 128, 128], - type='', - swap='skirt_kpt1'), - 185: - dict( - name='skirt_kpt4', - id=185, - color=[64, 128, 128], - type='', - swap='skirt_kpt8'), - 186: - dict( - name='skirt_kpt5', - id=186, - color=[64, 128, 128], - type='', - swap='skirt_kpt7'), - 187: - dict( - name='skirt_kpt6', id=187, color=[64, 128, 128], type='', swap=''), - 188: - dict( - name='skirt_kpt7', - id=188, - color=[64, 128, 128], - type='', - swap='skirt_kpt5'), - 189: - dict( - name='skirt_kpt8', - id=189, - color=[64, 128, 128], - type='', - swap='skirt_kpt4'), - 190: - dict(name='ssd_kpt1', id=190, color=[64, 64, 128], type='', swap=''), - 191: - dict( - name='ssd_kpt2', - id=191, - color=[64, 64, 128], - type='', - swap='ssd_kpt6'), - 192: - dict( - name='ssd_kpt3', - id=192, - color=[64, 64, 128], - type='', - swap='ssd_kpt5'), - 193: - dict(name='ssd_kpt4', id=193, color=[64, 64, 128], type='', swap=''), - 194: - dict( - name='ssd_kpt5', - id=194, - color=[64, 64, 128], - type='', - swap='ssd_kpt3'), - 195: - dict( - name='ssd_kpt6', - id=195, - color=[64, 64, 128], - type='', - swap='ssd_kpt2'), - 196: - dict( - name='ssd_kpt7', - id=196, - color=[64, 64, 128], - type='', - swap='ssd_kpt29'), - 197: - dict( - name='ssd_kpt8', - id=197, - color=[64, 64, 128], - type='', - swap='ssd_kpt28'), - 198: - dict( - name='ssd_kpt9', - id=198, - color=[64, 64, 128], - type='', - swap='ssd_kpt27'), - 199: - dict( - name='ssd_kpt10', - id=199, - color=[64, 64, 128], - type='', - swap='ssd_kpt26'), - 200: - dict( - name='ssd_kpt11', - id=200, - color=[64, 64, 128], - type='', - swap='ssd_kpt25'), - 201: - dict( - name='ssd_kpt12', - id=201, - color=[64, 64, 128], - type='', - swap='ssd_kpt24'), - 202: - dict( - name='ssd_kpt13', - id=202, - color=[64, 64, 128], - type='', - swap='ssd_kpt23'), - 203: - dict( - name='ssd_kpt14', - id=203, - color=[64, 64, 128], - type='', - swap='ssd_kpt22'), - 204: - dict( - name='ssd_kpt15', - id=204, - color=[64, 64, 128], - type='', - swap='ssd_kpt21'), - 205: - dict( - name='ssd_kpt16', - id=205, - color=[64, 64, 128], - type='', - swap='ssd_kpt20'), - 206: - dict( - name='ssd_kpt17', - id=206, - color=[64, 64, 128], - type='', - swap='ssd_kpt19'), - 207: - dict(name='ssd_kpt18', id=207, color=[64, 64, 128], type='', swap=''), - 208: - dict( - name='ssd_kpt19', - id=208, - color=[64, 64, 128], - type='', - swap='ssd_kpt17'), - 209: - dict( - name='ssd_kpt20', - id=209, - color=[64, 64, 128], - type='', - swap='ssd_kpt16'), - 210: - dict( - name='ssd_kpt21', - id=210, - color=[64, 64, 128], - type='', - swap='ssd_kpt15'), - 211: - dict( - name='ssd_kpt22', - id=211, - color=[64, 64, 128], - type='', - swap='ssd_kpt14'), - 212: - dict( - name='ssd_kpt23', - id=212, - color=[64, 64, 128], - type='', - swap='ssd_kpt13'), - 213: - dict( - name='ssd_kpt24', - id=213, - color=[64, 64, 128], - type='', - swap='ssd_kpt12'), - 214: - dict( - name='ssd_kpt25', - id=214, - color=[64, 64, 128], - type='', - swap='ssd_kpt11'), - 215: - dict( - name='ssd_kpt26', - id=215, - color=[64, 64, 128], - type='', - swap='ssd_kpt10'), - 216: - dict( - name='ssd_kpt27', - id=216, - color=[64, 64, 128], - type='', - swap='ssd_kpt9'), - 217: - dict( - name='ssd_kpt28', - id=217, - color=[64, 64, 128], - type='', - swap='ssd_kpt8'), - 218: - dict( - name='ssd_kpt29', - id=218, - color=[64, 64, 128], - type='', - swap='ssd_kpt7'), - 219: - dict(name='lsd_kpt1', id=219, color=[128, 64, 0], type='', swap=''), - 220: - dict( - name='lsd_kpt2', - id=220, - color=[128, 64, 0], - type='', - swap='lsd_kpt6'), - 221: - dict( - name='lsd_kpt3', - id=221, - color=[128, 64, 0], - type='', - swap='lsd_kpt5'), - 222: - dict(name='lsd_kpt4', id=222, color=[128, 64, 0], type='', swap=''), - 223: - dict( - name='lsd_kpt5', - id=223, - color=[128, 64, 0], - type='', - swap='lsd_kpt3'), - 224: - dict( - name='lsd_kpt6', - id=224, - color=[128, 64, 0], - type='', - swap='lsd_kpt2'), - 225: - dict( - name='lsd_kpt7', - id=225, - color=[128, 64, 0], - type='', - swap='lsd_kpt37'), - 226: - dict( - name='lsd_kpt8', - id=226, - color=[128, 64, 0], - type='', - swap='lsd_kpt36'), - 227: - dict( - name='lsd_kpt9', - id=227, - color=[128, 64, 0], - type='', - swap='lsd_kpt35'), - 228: - dict( - name='lsd_kpt10', - id=228, - color=[128, 64, 0], - type='', - swap='lsd_kpt34'), - 229: - dict( - name='lsd_kpt11', - id=229, - color=[128, 64, 0], - type='', - swap='lsd_kpt33'), - 230: - dict( - name='lsd_kpt12', - id=230, - color=[128, 64, 0], - type='', - swap='lsd_kpt32'), - 231: - dict( - name='lsd_kpt13', - id=231, - color=[128, 64, 0], - type='', - swap='lsd_kpt31'), - 232: - dict( - name='lsd_kpt14', - id=232, - color=[128, 64, 0], - type='', - swap='lsd_kpt30'), - 233: - dict( - name='lsd_kpt15', - id=233, - color=[128, 64, 0], - type='', - swap='lsd_kpt29'), - 234: - dict( - name='lsd_kpt16', - id=234, - color=[128, 64, 0], - type='', - swap='lsd_kpt28'), - 235: - dict( - name='lsd_kpt17', - id=235, - color=[128, 64, 0], - type='', - swap='lsd_kpt27'), - 236: - dict( - name='lsd_kpt18', - id=236, - color=[128, 64, 0], - type='', - swap='lsd_kpt26'), - 237: - dict( - name='lsd_kpt19', - id=237, - color=[128, 64, 0], - type='', - swap='lsd_kpt25'), - 238: - dict( - name='lsd_kpt20', - id=238, - color=[128, 64, 0], - type='', - swap='lsd_kpt24'), - 239: - dict( - name='lsd_kpt21', - id=239, - color=[128, 64, 0], - type='', - swap='lsd_kpt23'), - 240: - dict(name='lsd_kpt22', id=240, color=[128, 64, 0], type='', swap=''), - 241: - dict( - name='lsd_kpt23', - id=241, - color=[128, 64, 0], - type='', - swap='lsd_kpt21'), - 242: - dict( - name='lsd_kpt24', - id=242, - color=[128, 64, 0], - type='', - swap='lsd_kpt20'), - 243: - dict( - name='lsd_kpt25', - id=243, - color=[128, 64, 0], - type='', - swap='lsd_kpt19'), - 244: - dict( - name='lsd_kpt26', - id=244, - color=[128, 64, 0], - type='', - swap='lsd_kpt18'), - 245: - dict( - name='lsd_kpt27', - id=245, - color=[128, 64, 0], - type='', - swap='lsd_kpt17'), - 246: - dict( - name='lsd_kpt28', - id=246, - color=[128, 64, 0], - type='', - swap='lsd_kpt16'), - 247: - dict( - name='lsd_kpt29', - id=247, - color=[128, 64, 0], - type='', - swap='lsd_kpt15'), - 248: - dict( - name='lsd_kpt30', - id=248, - color=[128, 64, 0], - type='', - swap='lsd_kpt14'), - 249: - dict( - name='lsd_kpt31', - id=249, - color=[128, 64, 0], - type='', - swap='lsd_kpt13'), - 250: - dict( - name='lsd_kpt32', - id=250, - color=[128, 64, 0], - type='', - swap='lsd_kpt12'), - 251: - dict( - name='lsd_kpt33', - id=251, - color=[128, 64, 0], - type='', - swap='lsd_kpt11'), - 252: - dict( - name='lsd_kpt34', - id=252, - color=[128, 64, 0], - type='', - swap='lsd_kpt10'), - 253: - dict( - name='lsd_kpt35', - id=253, - color=[128, 64, 0], - type='', - swap='lsd_kpt9'), - 254: - dict( - name='lsd_kpt36', - id=254, - color=[128, 64, 0], - type='', - swap='lsd_kpt8'), - 255: - dict( - name='lsd_kpt37', - id=255, - color=[128, 64, 0], - type='', - swap='lsd_kpt7'), - 256: - dict(name='vd_kpt1', id=256, color=[128, 64, 255], type='', swap=''), - 257: - dict( - name='vd_kpt2', - id=257, - color=[128, 64, 255], - type='', - swap='vd_kpt6'), - 258: - dict( - name='vd_kpt3', - id=258, - color=[128, 64, 255], - type='', - swap='vd_kpt5'), - 259: - dict(name='vd_kpt4', id=259, color=[128, 64, 255], type='', swap=''), - 260: - dict( - name='vd_kpt5', - id=260, - color=[128, 64, 255], - type='', - swap='vd_kpt3'), - 261: - dict( - name='vd_kpt6', - id=261, - color=[128, 64, 255], - type='', - swap='vd_kpt2'), - 262: - dict( - name='vd_kpt7', - id=262, - color=[128, 64, 255], - type='', - swap='vd_kpt19'), - 263: - dict( - name='vd_kpt8', - id=263, - color=[128, 64, 255], - type='', - swap='vd_kpt18'), - 264: - dict( - name='vd_kpt9', - id=264, - color=[128, 64, 255], - type='', - swap='vd_kpt17'), - 265: - dict( - name='vd_kpt10', - id=265, - color=[128, 64, 255], - type='', - swap='vd_kpt16'), - 266: - dict( - name='vd_kpt11', - id=266, - color=[128, 64, 255], - type='', - swap='vd_kpt15'), - 267: - dict( - name='vd_kpt12', - id=267, - color=[128, 64, 255], - type='', - swap='vd_kpt14'), - 268: - dict(name='vd_kpt13', id=268, color=[128, 64, 255], type='', swap=''), - 269: - dict( - name='vd_kpt14', - id=269, - color=[128, 64, 255], - type='', - swap='vd_kpt12'), - 270: - dict( - name='vd_kpt15', - id=270, - color=[128, 64, 255], - type='', - swap='vd_kpt11'), - 271: - dict( - name='vd_kpt16', - id=271, - color=[128, 64, 255], - type='', - swap='vd_kpt10'), - 272: - dict( - name='vd_kpt17', - id=272, - color=[128, 64, 255], - type='', - swap='vd_kpt9'), - 273: - dict( - name='vd_kpt18', - id=273, - color=[128, 64, 255], - type='', - swap='vd_kpt8'), - 274: - dict( - name='vd_kpt19', - id=274, - color=[128, 64, 255], - type='', - swap='vd_kpt7'), - 275: - dict(name='sd_kpt1', id=275, color=[128, 64, 0], type='', swap=''), - 276: - dict( - name='sd_kpt2', - id=276, - color=[128, 64, 0], - type='', - swap='sd_kpt6'), - 277: - dict( - name='sd_kpt3', - id=277, - color=[128, 64, 0], - type='', - swap='sd_kpt5'), - 278: - dict(name='sd_kpt4', id=278, color=[128, 64, 0], type='', swap=''), - 279: - dict( - name='sd_kpt5', - id=279, - color=[128, 64, 0], - type='', - swap='sd_kpt3'), - 280: - dict( - name='sd_kpt6', - id=280, - color=[128, 64, 0], - type='', - swap='sd_kpt2'), - 281: - dict( - name='sd_kpt7', - id=281, - color=[128, 64, 0], - type='', - swap='sd_kpt19'), - 282: - dict( - name='sd_kpt8', - id=282, - color=[128, 64, 0], - type='', - swap='sd_kpt18'), - 283: - dict( - name='sd_kpt9', - id=283, - color=[128, 64, 0], - type='', - swap='sd_kpt17'), - 284: - dict( - name='sd_kpt10', - id=284, - color=[128, 64, 0], - type='', - swap='sd_kpt16'), - 285: - dict( - name='sd_kpt11', - id=285, - color=[128, 64, 0], - type='', - swap='sd_kpt15'), - 286: - dict( - name='sd_kpt12', - id=286, - color=[128, 64, 0], - type='', - swap='sd_kpt14'), - 287: - dict(name='sd_kpt13', id=287, color=[128, 64, 0], type='', swap=''), - 288: - dict( - name='sd_kpt14', - id=288, - color=[128, 64, 0], - type='', - swap='sd_kpt12'), - 289: - dict( - name='sd_kpt15', - id=289, - color=[128, 64, 0], - type='', - swap='sd_kpt11'), - 290: - dict( - name='sd_kpt16', - id=290, - color=[128, 64, 0], - type='', - swap='sd_kpt10'), - 291: - dict( - name='sd_kpt17', - id=291, - color=[128, 64, 0], - type='', - swap='sd_kpt9'), - 292: - dict( - name='sd_kpt18', - id=292, - color=[128, 64, 0], - type='', - swap='sd_kpt8'), - 293: - dict( - name='sd_kpt19', - id=293, - color=[128, 64, 0], - type='', - swap='sd_kpt7') - }), - skeleton_info=dict({ - 0: - dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]), - 1: - dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]), - 2: - dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]), - 3: - dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]), - 4: - dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]), - 5: - dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]), - 6: - dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]), - 7: - dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]), - 8: - dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]), - 9: - dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]), - 10: - dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]), - 11: - dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]), - 12: - dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]), - 13: - dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]), - 14: - dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]), - 15: - dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]), - 16: - dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]), - 17: - dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]), - 18: - dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]), - 19: - dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]), - 20: - dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]), - 21: - dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]), - 22: - dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]), - 23: - dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]), - 24: - dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]), - 25: - dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]), - 26: - dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]), - 27: - dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]), - 28: - dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]), - 29: - dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]), - 30: - dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]), - 31: - dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]), - 32: - dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]), - 33: - dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]), - 34: - dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]), - 35: - dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]), - 36: - dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]), - 37: - dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]), - 38: - dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]), - 39: - dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]), - 40: - dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]), - 41: - dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]), - 42: - dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]), - 43: - dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]), - 44: - dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]), - 45: - dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]), - 46: - dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]), - 47: - dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]), - 48: - dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]), - 49: - dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]), - 50: - dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]), - 51: - dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]), - 52: - dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]), - 53: - dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]), - 54: - dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]), - 55: - dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]), - 56: - dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]), - 57: - dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]), - 58: - dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]), - 59: - dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]), - 60: - dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]), - 61: - dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]), - 62: - dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]), - 63: - dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]), - 64: - dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]), - 65: - dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]), - 66: - dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]), - 67: - dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]), - 68: - dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]), - 69: - dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]), - 70: - dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]), - 71: - dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]), - 72: - dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]), - 73: - dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]), - 74: - dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]), - 75: - dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]), - 76: - dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]), - 77: - dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]), - 78: - dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]), - 79: - dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]), - 80: - dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]), - 81: - dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]), - 82: - dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]), - 83: - dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]), - 84: - dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]), - 85: - dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]), - 86: - dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]), - 87: - dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]), - 88: - dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]), - 89: - dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]), - 90: - dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]), - 91: - dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]), - 92: - dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]), - 93: - dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]), - 94: - dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]), - 95: - dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]), - 96: - dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]), - 97: - dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]), - 98: - dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]), - 99: - dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]), - 100: - dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]), - 101: - dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]), - 102: - dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]), - 103: - dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]), - 104: - dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]), - 105: - dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]), - 106: - dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]), - 107: - dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]), - 108: - dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]), - 109: - dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]), - 110: - dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]), - 111: - dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]), - 112: - dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]), - 113: - dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]), - 114: - dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]), - 115: - dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]), - 116: - dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]), - 117: - dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]), - 118: - dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]), - 119: - dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]), - 120: - dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]), - 121: - dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]), - 122: - dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]), - 123: - dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]), - 124: - dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]), - 125: - dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]), - 126: - dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]), - 127: - dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]), - 128: - dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]), - 129: - dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]), - 130: - dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]), - 131: - dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]), - 132: - dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]), - 133: - dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]), - 134: - dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]), - 135: - dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]), - 136: - dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]), - 137: - dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]), - 138: - dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]), - 139: - dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]), - 140: - dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]), - 141: - dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]), - 142: - dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]), - 143: - dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]), - 144: - dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]), - 145: - dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]), - 146: - dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]), - 147: - dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]), - 148: - dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]), - 149: - dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]), - 150: - dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]), - 151: - dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]), - 152: - dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]), - 153: - dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]), - 154: - dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]), - 155: - dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]), - 156: - dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]), - 157: - dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]), - 158: - dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]), - 159: - dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]), - 160: - dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]), - 161: - dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]), - 162: - dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]), - 163: - dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]), - 164: - dict( - link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128, - 128]), - 165: - dict( - link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128, - 128]), - 166: - dict( - link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128, - 128]), - 167: - dict( - link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128, - 128]), - 168: - dict( - link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128, - 128]), - 169: - dict( - link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128, - 128]), - 170: - dict( - link=('shorts_kpt9', 'shorts_kpt10'), - id=170, - color=[128, 128, 128]), - 171: - dict( - link=('shorts_kpt10', 'shorts_kpt3'), - id=171, - color=[128, 128, 128]), - 172: - dict( - link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128, - 128]), - 173: - dict( - link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128, - 128]), - 174: - dict( - link=('trousers_kpt1', 'trousers_kpt4'), - id=174, - color=[128, 0, 128]), - 175: - dict( - link=('trousers_kpt4', 'trousers_kpt5'), - id=175, - color=[128, 0, 128]), - 176: - dict( - link=('trousers_kpt5', 'trousers_kpt6'), - id=176, - color=[128, 0, 128]), - 177: - dict( - link=('trousers_kpt6', 'trousers_kpt7'), - id=177, - color=[128, 0, 128]), - 178: - dict( - link=('trousers_kpt7', 'trousers_kpt8'), - id=178, - color=[128, 0, 128]), - 179: - dict( - link=('trousers_kpt8', 'trousers_kpt9'), - id=179, - color=[128, 0, 128]), - 180: - dict( - link=('trousers_kpt9', 'trousers_kpt10'), - id=180, - color=[128, 0, 128]), - 181: - dict( - link=('trousers_kpt10', 'trousers_kpt11'), - id=181, - color=[128, 0, 128]), - 182: - dict( - link=('trousers_kpt11', 'trousers_kpt12'), - id=182, - color=[128, 0, 128]), - 183: - dict( - link=('trousers_kpt12', 'trousers_kpt13'), - id=183, - color=[128, 0, 128]), - 184: - dict( - link=('trousers_kpt13', 'trousers_kpt14'), - id=184, - color=[128, 0, 128]), - 185: - dict( - link=('trousers_kpt14', 'trousers_kpt3'), - id=185, - color=[128, 0, 128]), - 186: - dict( - link=('trousers_kpt3', 'trousers_kpt2'), - id=186, - color=[128, 0, 128]), - 187: - dict( - link=('trousers_kpt2', 'trousers_kpt1'), - id=187, - color=[128, 0, 128]), - 188: - dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]), - 189: - dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]), - 190: - dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]), - 191: - dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]), - 192: - dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]), - 193: - dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]), - 194: - dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]), - 195: - dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]), - 196: - dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]), - 197: - dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]), - 198: - dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]), - 199: - dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]), - 200: - dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]), - 201: - dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]), - 202: - dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]), - 203: - dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]), - 204: - dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]), - 205: - dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]), - 206: - dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]), - 207: - dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]), - 208: - dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]), - 209: - dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]), - 210: - dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]), - 211: - dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]), - 212: - dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]), - 213: - dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]), - 214: - dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]), - 215: - dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]), - 216: - dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]), - 217: - dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]), - 218: - dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]), - 219: - dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]), - 220: - dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]), - 221: - dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]), - 222: - dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]), - 223: - dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]), - 224: - dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]), - 225: - dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]), - 226: - dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]), - 227: - dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]), - 228: - dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]), - 229: - dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]), - 230: - dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]), - 231: - dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]), - 232: - dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]), - 233: - dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]), - 234: - dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]), - 235: - dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]), - 236: - dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]), - 237: - dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]), - 238: - dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]), - 239: - dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]), - 240: - dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]), - 241: - dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]), - 242: - dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]), - 243: - dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]), - 244: - dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]), - 245: - dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]), - 246: - dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]), - 247: - dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]), - 248: - dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]), - 249: - dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]), - 250: - dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]), - 251: - dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]), - 252: - dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]), - 253: - dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]), - 254: - dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]), - 255: - dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]), - 256: - dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]), - 257: - dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]), - 258: - dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]), - 259: - dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]), - 260: - dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]), - 261: - dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]), - 262: - dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]), - 263: - dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]), - 264: - dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]), - 265: - dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]), - 266: - dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]), - 267: - dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]), - 268: - dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]), - 269: - dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]), - 270: - dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]), - 271: - dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]), - 272: - dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]), - 273: - dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]), - 274: - dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]), - 275: - dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]), - 276: - dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]), - 277: - dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]), - 278: - dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]), - 279: - dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]), - 280: - dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]), - 281: - dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]), - 282: - dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]), - 283: - dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]), - 284: - dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]), - 285: - dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]), - 286: - dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]), - 287: - dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]), - 288: - dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]), - 289: - dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]), - 290: - dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]), - 291: - dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]), - 292: - dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]), - 293: - dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]), - 294: - dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]), - 295: - dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]), - 296: - dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]), - 297: - dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]), - 298: - dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]), - 299: - dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]), - 300: - dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]), - 301: - dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]), - 302: - dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]), - 303: - dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0]) - }), - joint_weights=[ - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 - ], - sigmas=[]) -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False), - dict( - type='MultiStepLR', - begin=0, - end=120, - milestones=[80, 100], - gamma=0.1, - by_epoch=True) -] -optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) -auto_scale_lr = dict(base_batch_size=512) -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfashion2/' -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=(192, 256)), - dict( - type='GenerateTarget', - encoder=dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2)), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=dict(backend='local')), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=(192, 256)), - dict(type='PackPoseInputs') -] -train_dataloader = dict( - batch_size=64, - num_workers=6, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='DeepFashion2Dataset', - data_root='data/deepfashion2/', - data_mode='topdown', - ann_file='train/deepfashion2_sling.json', - data_prefix=dict(img='train/image/'), - pipeline=[ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=(192, 256)), - dict( - type='GenerateTarget', - encoder=dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2)), - dict(type='PackPoseInputs') - ])) -val_dataloader = dict( - batch_size=32, - num_workers=6, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type='DeepFashion2Dataset', - data_root='data/deepfashion2/', - data_mode='topdown', - ann_file='validation/deepfashion2_sling.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=[ - dict(type='LoadImage', backend_args=dict(backend='local')), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=(192, 256)), - dict(type='PackPoseInputs') - ])) -test_dataloader = dict( - batch_size=32, - num_workers=6, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type='DeepFashion2Dataset', - data_root='data/deepfashion2/', - data_mode='topdown', - ann_file='validation/deepfashion2_sling.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=[ - dict(type='LoadImage', backend_args=dict(backend='local')), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=(192, 256)), - dict(type='PackPoseInputs') - ])) -channel_cfg = dict( - num_output_channels=294, - dataset_joints=294, - dataset_channel=[[ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, - 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, - 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, - 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, - 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, - 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, - 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, - 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, - 290, 291, 292, 293 - ]], - inference_channel=[ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, - 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, - 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, - 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, - 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, - 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, - 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, - 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, - 290, 291, 292, 293 - ]) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2)), - test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True)) -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -test_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -launcher = 'pytorch' -work_dir = './work_dirs/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192' diff --git a/spaces/AUBADA-ALARABI/poetry2023/app.py b/spaces/AUBADA-ALARABI/poetry2023/app.py deleted file mode 100644 index 5b6654d5a405778ddbc9ca5fa5d041aff535f3b5..0000000000000000000000000000000000000000 --- a/spaces/AUBADA-ALARABI/poetry2023/app.py +++ /dev/null @@ -1,53 +0,0 @@ -import gc -import gradio as gr -from transformers import pipeline, set_seed - -pipe = pipeline('text-generation', framework='pt', model='akhooli/ap2023', tokenizer='akhooli/ap2023') -#gc.collect() -samples = [['أنت' - ,1.0, 50, 1.0, 1.0, 114],['هل غادر' - ,1.0, 50, 1.0, 1.0, 114 ],['ألا ليت' - ,1.0, 50, 1.0, 1.0, 114 ],['يا قدس' - ,1.0, 50, 1.0, 1.0, 114],['عيد بأية حال' - ,1.0, 50, 1.0, 1.0, 114],['لكل شيء إذا ما' - ,1.0, 50, 1.0, 1.0, 114 ],['.' - ,1.0, 50, 1.0, 1.0, 114]] - -notes = """ -- Enter a short prompt or select (click) one of the examples and click SEND -- Adjust parameters (temperture, top k, top p and penalty) through the slider (keep close to default values). -- For the same seed (randomness), the same output is regenerated if other parameters are fixed -- Clear and enter new prompt or select another example and SEND to regenerate -- The '.' means start a new line from no prompt (your prompt need not be long) -- Be patient: this runs on CPU (free tier) -- Feedback (Twitter): @akhooli (https://twitter.com/akhooli/status/1611025232201977859) -- Note/Disclaimer: may generate unaccepted or inappropriate content. Use at your own risk. -""" -def sayPoetry(prompt, temp=1.0, topk = 50, topp = 1.0, penalty=1.0, seed=114): - if not int(seed) >= 0: seed=114 - set_seed(seed) - gen = pipe(prompt, max_length=96, do_sample=True, temperature=temp, top_k=topk, top_p=topp, repetition_penalty=penalty, - min_length = 64, no_repeat_ngram_size = 3, return_full_text=True, - num_beams=5, num_return_sequences=1)[0]["generated_text"] - poetry ="" - for line in gen.split('.')[:-1]: - poetry += line #+ "\n" - return poetry -poetry = gr.Interface(fn=sayPoetry, - inputs=[ - gr.Textbox(label="Enter short prompt or select from examples:"), - gr.Slider(0.70, 1.2, step=0.01,value=1.0, label='control temperature'), - gr.Slider(25, 100, step=1,value=50, label='control top k'), - gr.Slider(0.80, 1.0, step=0.01,value=1.0, label='control top p'), - gr.Slider(0.90, 1.50, step=0.01,value=1.0, label='control penalty'), - gr.Number(value=139750, precision=0, label='Seed'), - ], - outputs=[gr.Textbox(label="Generated Poetry:")], - - allow_flagging='never', - title='Arabic Poetry Generation Demo (updated Jan. 2023)', - description = "A simple demo of AI generated poetry based on 1M poems fine-tuned using AraGPT2 (be patient, runs on cpu)", - examples=samples, - cache_examples=False, - article = notes) -poetry.launch() # show_error = True, debug=True \ No newline at end of file diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/requests.py b/spaces/AchyuthGamer/OpenGPT/g4f/requests.py deleted file mode 100644 index f238062e3e3832d79a5e12b1d3f96c11d708cb0e..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/requests.py +++ /dev/null @@ -1,181 +0,0 @@ -from __future__ import annotations - -import warnings -import json -import asyncio -from functools import partialmethod -from asyncio import Future, Queue -from typing import AsyncGenerator, Union, Optional - -from curl_cffi.requests import AsyncSession, Response -import curl_cffi - -is_newer_0_5_8: bool = hasattr(AsyncSession, "_set_cookies") or hasattr(curl_cffi.requests.Cookies, "get_cookies_for_curl") -is_newer_0_5_9: bool = hasattr(curl_cffi.AsyncCurl, "remove_handle") -is_newer_0_5_10: bool = hasattr(AsyncSession, "release_curl") - - -class StreamResponse: - def __init__(self, inner: Response, queue: Queue[bytes]) -> None: - self.inner: Response = inner - self.queue: Queue[bytes] = queue - self.request = inner.request - self.status_code: int = inner.status_code - self.reason: str = inner.reason - self.ok: bool = inner.ok - self.headers = inner.headers - self.cookies = inner.cookies - - async def text(self) -> str: - content: bytes = await self.read() - return content.decode() - - def raise_for_status(self) -> None: - if not self.ok: - raise RuntimeError(f"HTTP Error {self.status_code}: {self.reason}") - - async def json(self, **kwargs) -> dict: - return json.loads(await self.read(), **kwargs) - - async def iter_lines( - self, chunk_size: Optional[int] = None, decode_unicode: bool = False, delimiter: Optional[str] = None - ) -> AsyncGenerator[bytes, None]: - """ - Copied from: https://requests.readthedocs.io/en/latest/_modules/requests/models/ - which is under the License: Apache 2.0 - """ - - pending: bytes = None - - async for chunk in self.iter_content( - chunk_size=chunk_size, decode_unicode=decode_unicode - ): - if pending is not None: - chunk = pending + chunk - if delimiter: - lines = chunk.split(delimiter) - else: - lines = chunk.splitlines() - if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: - pending = lines.pop() - else: - pending = None - - for line in lines: - yield line - - if pending is not None: - yield pending - - async def iter_content( - self, chunk_size: Optional[int] = None, decode_unicode: bool = False - ) -> AsyncGenerator[bytes, None]: - if chunk_size: - warnings.warn("chunk_size is ignored, there is no way to tell curl that.") - if decode_unicode: - raise NotImplementedError() - while True: - chunk = await self.queue.get() - if chunk is None: - return - yield chunk - - async def read(self) -> bytes: - return b"".join([chunk async for chunk in self.iter_content()]) - - -class StreamRequest: - def __init__(self, session: AsyncSession, method: str, url: str, **kwargs: Union[bool, int, str]) -> None: - self.session: AsyncSession = session - self.loop: asyncio.AbstractEventLoop = session.loop if session.loop else asyncio.get_running_loop() - self.queue: Queue[bytes] = Queue() - self.method: str = method - self.url: str = url - self.options: dict = kwargs - self.handle: Optional[curl_cffi.AsyncCurl] = None - - def _on_content(self, data: bytes) -> None: - if not self.enter.done(): - self.enter.set_result(None) - self.queue.put_nowait(data) - - def _on_done(self, task: Future) -> None: - if not self.enter.done(): - self.enter.set_result(None) - self.queue.put_nowait(None) - - self.loop.call_soon(self.release_curl) - - async def fetch(self) -> StreamResponse: - if self.handle: - raise RuntimeError("Request already started") - self.curl: curl_cffi.AsyncCurl = await self.session.pop_curl() - self.enter: asyncio.Future = self.loop.create_future() - if is_newer_0_5_10: - request, _, header_buffer, _, _ = self.session._set_curl_options( - self.curl, - self.method, - self.url, - content_callback=self._on_content, - **self.options - ) - else: - request, _, header_buffer = self.session._set_curl_options( - self.curl, - self.method, - self.url, - content_callback=self._on_content, - **self.options - ) - if is_newer_0_5_9: - self.handle = self.session.acurl.add_handle(self.curl) - else: - await self.session.acurl.add_handle(self.curl, False) - self.handle = self.session.acurl._curl2future[self.curl] - self.handle.add_done_callback(self._on_done) - # Wait for headers - await self.enter - # Raise exceptions - if self.handle.done(): - self.handle.result() - if is_newer_0_5_8: - response = self.session._parse_response(self.curl, _, header_buffer) - response.request = request - else: - response = self.session._parse_response(self.curl, request, _, header_buffer) - return StreamResponse(response, self.queue) - - async def __aenter__(self) -> StreamResponse: - return await self.fetch() - - async def __aexit__(self, *args) -> None: - self.release_curl() - - def release_curl(self) -> None: - if is_newer_0_5_10: - self.session.release_curl(self.curl) - return - if not self.curl: - return - self.curl.clean_after_perform() - if is_newer_0_5_9: - self.session.acurl.remove_handle(self.curl) - elif not self.handle.done() and not self.handle.cancelled(): - self.session.acurl.set_result(self.curl) - self.curl.reset() - self.session.push_curl(self.curl) - self.curl = None - - -class StreamSession(AsyncSession): - def request( - self, method: str, url: str, **kwargs - ) -> StreamRequest: - return StreamRequest(self, method, url, **kwargs) - - head = partialmethod(request, "HEAD") - get = partialmethod(request, "GET") - post = partialmethod(request, "POST") - put = partialmethod(request, "PUT") - patch = partialmethod(request, "PATCH") - delete = partialmethod(request, "DELETE") \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/arcadetcrp-plugin.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/arcadetcrp-plugin.js deleted file mode 100644 index 31aeaa4243d034c5b752da58df010824851137c7..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/arcadetcrp-plugin.js +++ /dev/null @@ -1,39 +0,0 @@ -import TCRP from './arcadetcrp.js'; - -const Recorder = TCRP.Recorder; -const Player = TCRP.Player; -const StepRunner = TCRP.StepRunner; - -class ArcadeTCRPPlugin extends Phaser.Plugins.BasePlugin { - constructor(pluginManager) { - super(pluginManager); - } - - start() { - var eventEmitter = this.game.events; - eventEmitter.on('destroy', this.destroy, this); - } - - addRecorder(parent, config) { - return new Recorder(parent, config); - } - - addPlayer(parent, config) { - return new Player(parent, config); - } - - addStepRunner(parent) { - return new StepRunner(parent); - } -} - -var methods = { - runCommands: TCRP.RunCommands -} - -Object.assign( - ArcadeTCRPPlugin.prototype, - methods -); - -export default ArcadeTCRPPlugin; \ No newline at end of file diff --git a/spaces/Akmyradov/dost.ai/README.md b/spaces/Akmyradov/dost.ai/README.md deleted file mode 100644 index 259cc9bbd95d13853371019621f6e34810c787ba..0000000000000000000000000000000000000000 --- a/spaces/Akmyradov/dost.ai/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Dost.ai -emoji: 🦀 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Alashazam/StoryGenerator/README.md b/spaces/Alashazam/StoryGenerator/README.md deleted file mode 100644 index 069ed0b2085343fddad97c599cecac650adad942..0000000000000000000000000000000000000000 --- a/spaces/Alashazam/StoryGenerator/README.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: GPT 2 Story Gen -emoji: 🧙🏻‍♂️ -colorFrom: purple -colorTo: blue -sdk: gradio -app_file: app.py -pinned: false -duplicated_from: merve/GPT-2-story-gen ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Alpaca233/SadTalker/src/utils/init_path.py b/spaces/Alpaca233/SadTalker/src/utils/init_path.py deleted file mode 100644 index 5f38d11907bd0dc789992062ce7f02d8876c638f..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/SadTalker/src/utils/init_path.py +++ /dev/null @@ -1,47 +0,0 @@ -import os -import glob - -def init_path(checkpoint_dir, config_dir, size=512, old_version=False, preprocess='crop'): - - if old_version: - #### load all the checkpoint of `pth` - sadtalker_paths = { - 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'), - 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'), - 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'), - 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'), - 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth') - } - - use_safetensor = False - elif len(glob.glob(os.path.join(checkpoint_dir, '*.safetensors'))): - print('using safetensor as default') - sadtalker_paths = { - "checkpoint":os.path.join(checkpoint_dir, 'SadTalker_V0.0.2_'+str(size)+'.safetensors'), - } - use_safetensor = True - else: - print("WARNING: The new version of the model will be updated by safetensor, you may need to download it mannully. We run the old version of the checkpoint this time!") - use_safetensor = False - - sadtalker_paths = { - 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'), - 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'), - 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'), - 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'), - 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth') - } - - sadtalker_paths['dir_of_BFM_fitting'] = os.path.join(config_dir) # , 'BFM_Fitting' - sadtalker_paths['audio2pose_yaml_path'] = os.path.join(config_dir, 'auido2pose.yaml') - sadtalker_paths['audio2exp_yaml_path'] = os.path.join(config_dir, 'auido2exp.yaml') - sadtalker_paths['use_safetensor'] = use_safetensor # os.path.join(config_dir, 'auido2exp.yaml') - - if 'full' in preprocess: - sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00109-model.pth.tar') - sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender_still.yaml') - else: - sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00229-model.pth.tar') - sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender.yaml') - - return sadtalker_paths \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_reference.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_reference.py deleted file mode 100644 index 364d5d80d721c1483e5b123a3dc92244af88715a..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_reference.py +++ /dev/null @@ -1,796 +0,0 @@ -# Inspired by: https://github.com/Mikubill/sd-webui-controlnet/discussions/1236 and https://github.com/Mikubill/sd-webui-controlnet/discussions/1280 -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -import numpy as np -import PIL.Image -import torch - -from diffusers import StableDiffusionPipeline -from diffusers.models.attention import BasicTransformerBlock -from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UpBlock2D -from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput -from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import rescale_noise_cfg -from diffusers.utils import PIL_INTERPOLATION, logging, randn_tensor - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -EXAMPLE_DOC_STRING = """ - Examples: - ```py - >>> import torch - >>> from diffusers import UniPCMultistepScheduler - >>> from diffusers.utils import load_image - - >>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png") - - >>> pipe = StableDiffusionReferencePipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", - safety_checker=None, - torch_dtype=torch.float16 - ).to('cuda:0') - - >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe_controlnet.scheduler.config) - - >>> result_img = pipe(ref_image=input_image, - prompt="1girl", - num_inference_steps=20, - reference_attn=True, - reference_adain=True).images[0] - - >>> result_img.show() - ``` -""" - - -def torch_dfs(model: torch.nn.Module): - result = [model] - for child in model.children(): - result += torch_dfs(child) - return result - - -class StableDiffusionReferencePipeline(StableDiffusionPipeline): - def _default_height_width(self, height, width, image): - # NOTE: It is possible that a list of images have different - # dimensions for each image, so just checking the first image - # is not _exactly_ correct, but it is simple. - while isinstance(image, list): - image = image[0] - - if height is None: - if isinstance(image, PIL.Image.Image): - height = image.height - elif isinstance(image, torch.Tensor): - height = image.shape[2] - - height = (height // 8) * 8 # round down to nearest multiple of 8 - - if width is None: - if isinstance(image, PIL.Image.Image): - width = image.width - elif isinstance(image, torch.Tensor): - width = image.shape[3] - - width = (width // 8) * 8 # round down to nearest multiple of 8 - - return height, width - - def prepare_image( - self, - image, - width, - height, - batch_size, - num_images_per_prompt, - device, - dtype, - do_classifier_free_guidance=False, - guess_mode=False, - ): - if not isinstance(image, torch.Tensor): - if isinstance(image, PIL.Image.Image): - image = [image] - - if isinstance(image[0], PIL.Image.Image): - images = [] - - for image_ in image: - image_ = image_.convert("RGB") - image_ = image_.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]) - image_ = np.array(image_) - image_ = image_[None, :] - images.append(image_) - - image = images - - image = np.concatenate(image, axis=0) - image = np.array(image).astype(np.float32) / 255.0 - image = (image - 0.5) / 0.5 - image = image.transpose(0, 3, 1, 2) - image = torch.from_numpy(image) - elif isinstance(image[0], torch.Tensor): - image = torch.cat(image, dim=0) - - image_batch_size = image.shape[0] - - if image_batch_size == 1: - repeat_by = batch_size - else: - # image batch size is the same as prompt batch size - repeat_by = num_images_per_prompt - - image = image.repeat_interleave(repeat_by, dim=0) - - image = image.to(device=device, dtype=dtype) - - if do_classifier_free_guidance and not guess_mode: - image = torch.cat([image] * 2) - - return image - - def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance): - refimage = refimage.to(device=device, dtype=dtype) - - # encode the mask image into latents space so we can concatenate it to the latents - if isinstance(generator, list): - ref_image_latents = [ - self.vae.encode(refimage[i : i + 1]).latent_dist.sample(generator=generator[i]) - for i in range(batch_size) - ] - ref_image_latents = torch.cat(ref_image_latents, dim=0) - else: - ref_image_latents = self.vae.encode(refimage).latent_dist.sample(generator=generator) - ref_image_latents = self.vae.config.scaling_factor * ref_image_latents - - # duplicate mask and ref_image_latents for each generation per prompt, using mps friendly method - if ref_image_latents.shape[0] < batch_size: - if not batch_size % ref_image_latents.shape[0] == 0: - raise ValueError( - "The passed images and the required batch size don't match. Images are supposed to be duplicated" - f" to a total batch size of {batch_size}, but {ref_image_latents.shape[0]} images were passed." - " Make sure the number of images that you pass is divisible by the total requested batch size." - ) - ref_image_latents = ref_image_latents.repeat(batch_size // ref_image_latents.shape[0], 1, 1, 1) - - ref_image_latents = torch.cat([ref_image_latents] * 2) if do_classifier_free_guidance else ref_image_latents - - # aligning device to prevent device errors when concating it with the latent model input - ref_image_latents = ref_image_latents.to(device=device, dtype=dtype) - return ref_image_latents - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]] = None, - ref_image: Union[torch.FloatTensor, PIL.Image.Image] = None, - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - guidance_rescale: float = 0.0, - attention_auto_machine_weight: float = 1.0, - gn_auto_machine_weight: float = 1.0, - style_fidelity: float = 0.5, - reference_attn: bool = True, - reference_adain: bool = True, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. - instead. - ref_image (`torch.FloatTensor`, `PIL.Image.Image`): - The Reference Control input condition. Reference Control uses this input condition to generate guidance to Unet. If - the type is specified as `Torch.FloatTensor`, it is passed to Reference Control as is. `PIL.Image.Image` can - also be accepted as an image. - height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - cross_attention_kwargs (`dict`, *optional*): - A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under - `self.processor` in - [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). - guidance_rescale (`float`, *optional*, defaults to 0.7): - Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are - Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of - [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). - Guidance rescale factor should fix overexposure when using zero terminal SNR. - attention_auto_machine_weight (`float`): - Weight of using reference query for self attention's context. - If attention_auto_machine_weight=1.0, use reference query for all self attention's context. - gn_auto_machine_weight (`float`): - Weight of using reference adain. If gn_auto_machine_weight=2.0, use all reference adain plugins. - style_fidelity (`float`): - style fidelity of ref_uncond_xt. If style_fidelity=1.0, control more important, - elif style_fidelity=0.0, prompt more important, else balanced. - reference_attn (`bool`): - Whether to use reference query for self attention's context. - reference_adain (`bool`): - Whether to use reference adain. - - Examples: - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - assert reference_attn or reference_adain, "`reference_attn` or `reference_adain` must be True." - - # 0. Default height and width to unet - height, width = self._default_height_width(height, width, ref_image) - - # 1. Check inputs. Raise error if not correct - self.check_inputs( - prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds - ) - - # 2. Define call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 3. Encode input prompt - text_encoder_lora_scale = ( - cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None - ) - prompt_embeds = self._encode_prompt( - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - lora_scale=text_encoder_lora_scale, - ) - - # 4. Preprocess reference image - ref_image = self.prepare_image( - image=ref_image, - width=width, - height=height, - batch_size=batch_size * num_images_per_prompt, - num_images_per_prompt=num_images_per_prompt, - device=device, - dtype=prompt_embeds.dtype, - ) - - # 5. Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - - # 6. Prepare latent variables - num_channels_latents = self.unet.config.in_channels - latents = self.prepare_latents( - batch_size * num_images_per_prompt, - num_channels_latents, - height, - width, - prompt_embeds.dtype, - device, - generator, - latents, - ) - - # 7. Prepare reference latent variables - ref_image_latents = self.prepare_ref_latents( - ref_image, - batch_size * num_images_per_prompt, - prompt_embeds.dtype, - device, - generator, - do_classifier_free_guidance, - ) - - # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 9. Modify self attention and group norm - MODE = "write" - uc_mask = ( - torch.Tensor([1] * batch_size * num_images_per_prompt + [0] * batch_size * num_images_per_prompt) - .type_as(ref_image_latents) - .bool() - ) - - def hacked_basic_transformer_inner_forward( - self, - hidden_states: torch.FloatTensor, - attention_mask: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - timestep: Optional[torch.LongTensor] = None, - cross_attention_kwargs: Dict[str, Any] = None, - class_labels: Optional[torch.LongTensor] = None, - ): - if self.use_ada_layer_norm: - norm_hidden_states = self.norm1(hidden_states, timestep) - elif self.use_ada_layer_norm_zero: - norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( - hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype - ) - else: - norm_hidden_states = self.norm1(hidden_states) - - # 1. Self-Attention - cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} - if self.only_cross_attention: - attn_output = self.attn1( - norm_hidden_states, - encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, - attention_mask=attention_mask, - **cross_attention_kwargs, - ) - else: - if MODE == "write": - self.bank.append(norm_hidden_states.detach().clone()) - attn_output = self.attn1( - norm_hidden_states, - encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, - attention_mask=attention_mask, - **cross_attention_kwargs, - ) - if MODE == "read": - if attention_auto_machine_weight > self.attn_weight: - attn_output_uc = self.attn1( - norm_hidden_states, - encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1), - # attention_mask=attention_mask, - **cross_attention_kwargs, - ) - attn_output_c = attn_output_uc.clone() - if do_classifier_free_guidance and style_fidelity > 0: - attn_output_c[uc_mask] = self.attn1( - norm_hidden_states[uc_mask], - encoder_hidden_states=norm_hidden_states[uc_mask], - **cross_attention_kwargs, - ) - attn_output = style_fidelity * attn_output_c + (1.0 - style_fidelity) * attn_output_uc - self.bank.clear() - else: - attn_output = self.attn1( - norm_hidden_states, - encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, - attention_mask=attention_mask, - **cross_attention_kwargs, - ) - if self.use_ada_layer_norm_zero: - attn_output = gate_msa.unsqueeze(1) * attn_output - hidden_states = attn_output + hidden_states - - if self.attn2 is not None: - norm_hidden_states = ( - self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) - ) - - # 2. Cross-Attention - attn_output = self.attn2( - norm_hidden_states, - encoder_hidden_states=encoder_hidden_states, - attention_mask=encoder_attention_mask, - **cross_attention_kwargs, - ) - hidden_states = attn_output + hidden_states - - # 3. Feed-forward - norm_hidden_states = self.norm3(hidden_states) - - if self.use_ada_layer_norm_zero: - norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] - - ff_output = self.ff(norm_hidden_states) - - if self.use_ada_layer_norm_zero: - ff_output = gate_mlp.unsqueeze(1) * ff_output - - hidden_states = ff_output + hidden_states - - return hidden_states - - def hacked_mid_forward(self, *args, **kwargs): - eps = 1e-6 - x = self.original_forward(*args, **kwargs) - if MODE == "write": - if gn_auto_machine_weight >= self.gn_weight: - var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0) - self.mean_bank.append(mean) - self.var_bank.append(var) - if MODE == "read": - if len(self.mean_bank) > 0 and len(self.var_bank) > 0: - var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0) - std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 - mean_acc = sum(self.mean_bank) / float(len(self.mean_bank)) - var_acc = sum(self.var_bank) / float(len(self.var_bank)) - std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 - x_uc = (((x - mean) / std) * std_acc) + mean_acc - x_c = x_uc.clone() - if do_classifier_free_guidance and style_fidelity > 0: - x_c[uc_mask] = x[uc_mask] - x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc - self.mean_bank = [] - self.var_bank = [] - return x - - def hack_CrossAttnDownBlock2D_forward( - self, - hidden_states: torch.FloatTensor, - temb: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - ): - eps = 1e-6 - - # TODO(Patrick, William) - attention mask is not used - output_states = () - - for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)): - hidden_states = resnet(hidden_states, temb) - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - cross_attention_kwargs=cross_attention_kwargs, - attention_mask=attention_mask, - encoder_attention_mask=encoder_attention_mask, - return_dict=False, - )[0] - if MODE == "write": - if gn_auto_machine_weight >= self.gn_weight: - var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) - self.mean_bank.append([mean]) - self.var_bank.append([var]) - if MODE == "read": - if len(self.mean_bank) > 0 and len(self.var_bank) > 0: - var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) - std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 - mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i])) - var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i])) - std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 - hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc - hidden_states_c = hidden_states_uc.clone() - if do_classifier_free_guidance and style_fidelity > 0: - hidden_states_c[uc_mask] = hidden_states[uc_mask] - hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc - - output_states = output_states + (hidden_states,) - - if MODE == "read": - self.mean_bank = [] - self.var_bank = [] - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - output_states = output_states + (hidden_states,) - - return hidden_states, output_states - - def hacked_DownBlock2D_forward(self, hidden_states, temb=None): - eps = 1e-6 - - output_states = () - - for i, resnet in enumerate(self.resnets): - hidden_states = resnet(hidden_states, temb) - - if MODE == "write": - if gn_auto_machine_weight >= self.gn_weight: - var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) - self.mean_bank.append([mean]) - self.var_bank.append([var]) - if MODE == "read": - if len(self.mean_bank) > 0 and len(self.var_bank) > 0: - var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) - std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 - mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i])) - var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i])) - std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 - hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc - hidden_states_c = hidden_states_uc.clone() - if do_classifier_free_guidance and style_fidelity > 0: - hidden_states_c[uc_mask] = hidden_states[uc_mask] - hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc - - output_states = output_states + (hidden_states,) - - if MODE == "read": - self.mean_bank = [] - self.var_bank = [] - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - output_states = output_states + (hidden_states,) - - return hidden_states, output_states - - def hacked_CrossAttnUpBlock2D_forward( - self, - hidden_states: torch.FloatTensor, - res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], - temb: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - upsample_size: Optional[int] = None, - attention_mask: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - ): - eps = 1e-6 - # TODO(Patrick, William) - attention mask is not used - for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)): - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - hidden_states = resnet(hidden_states, temb) - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - cross_attention_kwargs=cross_attention_kwargs, - attention_mask=attention_mask, - encoder_attention_mask=encoder_attention_mask, - return_dict=False, - )[0] - - if MODE == "write": - if gn_auto_machine_weight >= self.gn_weight: - var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) - self.mean_bank.append([mean]) - self.var_bank.append([var]) - if MODE == "read": - if len(self.mean_bank) > 0 and len(self.var_bank) > 0: - var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) - std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 - mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i])) - var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i])) - std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 - hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc - hidden_states_c = hidden_states_uc.clone() - if do_classifier_free_guidance and style_fidelity > 0: - hidden_states_c[uc_mask] = hidden_states[uc_mask] - hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc - - if MODE == "read": - self.mean_bank = [] - self.var_bank = [] - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states, upsample_size) - - return hidden_states - - def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None): - eps = 1e-6 - for i, resnet in enumerate(self.resnets): - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - hidden_states = resnet(hidden_states, temb) - - if MODE == "write": - if gn_auto_machine_weight >= self.gn_weight: - var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) - self.mean_bank.append([mean]) - self.var_bank.append([var]) - if MODE == "read": - if len(self.mean_bank) > 0 and len(self.var_bank) > 0: - var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) - std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 - mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i])) - var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i])) - std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 - hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc - hidden_states_c = hidden_states_uc.clone() - if do_classifier_free_guidance and style_fidelity > 0: - hidden_states_c[uc_mask] = hidden_states[uc_mask] - hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc - - if MODE == "read": - self.mean_bank = [] - self.var_bank = [] - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states, upsample_size) - - return hidden_states - - if reference_attn: - attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock)] - attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) - - for i, module in enumerate(attn_modules): - module._original_inner_forward = module.forward - module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock) - module.bank = [] - module.attn_weight = float(i) / float(len(attn_modules)) - - if reference_adain: - gn_modules = [self.unet.mid_block] - self.unet.mid_block.gn_weight = 0 - - down_blocks = self.unet.down_blocks - for w, module in enumerate(down_blocks): - module.gn_weight = 1.0 - float(w) / float(len(down_blocks)) - gn_modules.append(module) - - up_blocks = self.unet.up_blocks - for w, module in enumerate(up_blocks): - module.gn_weight = float(w) / float(len(up_blocks)) - gn_modules.append(module) - - for i, module in enumerate(gn_modules): - if getattr(module, "original_forward", None) is None: - module.original_forward = module.forward - if i == 0: - # mid_block - module.forward = hacked_mid_forward.__get__(module, torch.nn.Module) - elif isinstance(module, CrossAttnDownBlock2D): - module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D) - elif isinstance(module, DownBlock2D): - module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D) - elif isinstance(module, CrossAttnUpBlock2D): - module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D) - elif isinstance(module, UpBlock2D): - module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D) - module.mean_bank = [] - module.var_bank = [] - module.gn_weight *= 2 - - # 10. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # ref only part - noise = randn_tensor( - ref_image_latents.shape, generator=generator, device=device, dtype=ref_image_latents.dtype - ) - ref_xt = self.scheduler.add_noise( - ref_image_latents, - noise, - t.reshape( - 1, - ), - ) - ref_xt = self.scheduler.scale_model_input(ref_xt, t) - - MODE = "write" - self.unet( - ref_xt, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - return_dict=False, - ) - - # predict the noise residual - MODE = "read" - noise_pred = self.unet( - latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - return_dict=False, - )[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - if do_classifier_free_guidance and guidance_rescale > 0.0: - # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf - noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - if not output_type == "latent": - image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] - image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) - else: - image = latents - has_nsfw_concept = None - - if has_nsfw_concept is None: - do_denormalize = [True] * image.shape[0] - else: - do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] - - image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) - - # Offload last model to CPU - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.final_offload_hook.offload() - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/README.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/README.md deleted file mode 100644 index e450f726e9207a79c1b89f1d3d452d87bc197c5d..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/README.md +++ /dev/null @@ -1,743 +0,0 @@ -# DreamBooth training example - -[DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject. -The `train_dreambooth.py` script shows how to implement the training procedure and adapt it for stable diffusion. - - -## Running locally with PyTorch - -### Installing the dependencies - -Before running the scripts, make sure to install the library's training dependencies: - -**Important** - -To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: -```bash -git clone https://github.com/huggingface/diffusers -cd diffusers -pip install -e . -``` - -Then cd in the example folder and run -```bash -pip install -r requirements.txt -``` - -And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: - -```bash -accelerate config -``` - -Or for a default accelerate configuration without answering questions about your environment - -```bash -accelerate config default -``` - -Or if your environment doesn't support an interactive shell e.g. a notebook - -```python -from accelerate.utils import write_basic_config -write_basic_config() -``` - -When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. - -### Dog toy example - -Now let's get our dataset. For this example we will use some dog images: https://huggingface.co/datasets/diffusers/dog-example. - -Let's first download it locally: - -```python -from huggingface_hub import snapshot_download - -local_dir = "./dog" -snapshot_download( - "diffusers/dog-example", - local_dir=local_dir, repo_type="dataset", - ignore_patterns=".gitattributes", -) -``` - -And launch the training using: - -**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="dog" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a photo of sks dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=1 \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --max_train_steps=400 \ - --push_to_hub -``` - -### Training with prior-preservation loss - -Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data. -According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. The `num_class_images` flag sets the number of images to generate with the class prompt. You can place existing images in `class_data_dir`, and the training script will generate any additional images so that `num_class_images` are present in `class_data_dir` during training time. - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="dog" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=1 \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 \ - --push_to_hub -``` - - -### Training on a 16GB GPU: - -With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train dreambooth on a 16GB GPU. - -To install `bitsandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation). - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="dog" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=2 --gradient_checkpointing \ - --use_8bit_adam \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 \ - --push_to_hub -``` - - -### Training on a 12GB GPU: - -It is possible to run dreambooth on a 12GB GPU by using the following optimizations: -- [gradient checkpointing and the 8-bit optimizer](#training-on-a-16gb-gpu) -- [xformers](#training-with-xformers) -- [setting grads to none](#set-grads-to-none) - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="dog" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=1 --gradient_checkpointing \ - --use_8bit_adam \ - --enable_xformers_memory_efficient_attention \ - --set_grads_to_none \ - --learning_rate=2e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 \ - --push_to_hub -``` - - -### Training on a 8 GB GPU: - -By using [DeepSpeed](https://www.deepspeed.ai/) it's possible to offload some -tensors from VRAM to either CPU or NVME allowing to train with less VRAM. - -DeepSpeed needs to be enabled with `accelerate config`. During configuration -answer yes to "Do you want to use DeepSpeed?". With DeepSpeed stage 2, fp16 -mixed precision and offloading both parameters and optimizer state to cpu it's -possible to train on under 8 GB VRAM with a drawback of requiring significantly -more RAM (about 25 GB). See [documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more DeepSpeed configuration options. - -Changing the default Adam optimizer to DeepSpeed's special version of Adam -`deepspeed.ops.adam.DeepSpeedCPUAdam` gives a substantial speedup but enabling -it requires CUDA toolchain with the same version as pytorch. 8-bit optimizer -does not seem to be compatible with DeepSpeed at the moment. - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="dog" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch --mixed_precision="fp16" train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --sample_batch_size=1 \ - --gradient_accumulation_steps=1 --gradient_checkpointing \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 \ - --push_to_hub -``` - -### Fine-tune text encoder with the UNet. - -The script also allows to fine-tune the `text_encoder` along with the `unet`. It's been observed experimentally that fine-tuning `text_encoder` gives much better results especially on faces. -Pass the `--train_text_encoder` argument to the script to enable training `text_encoder`. - -___Note: Training text encoder requires more memory, with this option the training won't fit on 16GB GPU. It needs at least 24GB VRAM.___ - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="dog" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_text_encoder \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --use_8bit_adam \ - --gradient_checkpointing \ - --learning_rate=2e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 \ - --push_to_hub -``` - -### Using DreamBooth for pipelines other than Stable Diffusion - -The [AltDiffusion pipeline](https://huggingface.co/docs/diffusers/api/pipelines/alt_diffusion) also supports dreambooth fine-tuning. The process is the same as above, all you need to do is replace the `MODEL_NAME` like this: - -``` -export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion-m9" -or -export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion" -``` - -### Inference - -Once you have trained a model using the above command, you can run inference simply using the `StableDiffusionPipeline`. Make sure to include the `identifier` (e.g. sks in above example) in your prompt. - -```python -from diffusers import StableDiffusionPipeline -import torch - -model_id = "path-to-your-trained-model" -pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") - -prompt = "A photo of sks dog in a bucket" -image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] - -image.save("dog-bucket.png") -``` - -### Inference from a training checkpoint - -You can also perform inference from one of the checkpoints saved during the training process, if you used the `--checkpointing_steps` argument. Please, refer to [the documentation](https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint) to see how to do it. - -## Training with Low-Rank Adaptation of Large Language Models (LoRA) - -Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen* - -In a nutshell, LoRA allows to adapt pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: -- Previous pretrained weights are kept frozen so that the model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114) -- Rank-decomposition matrices have significantly fewer parameters than the original model, which means that trained LoRA weights are easily portable. -- LoRA attention layers allow to control to which extent the model is adapted towards new training images via a `scale` parameter. - -[cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in -the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. - -### Training - -Let's get started with a simple example. We will re-use the dog example of the [previous section](#dog-toy-example). - -First, you need to set-up your dreambooth training example as is explained in the [installation section](#Installing-the-dependencies). -Next, let's download the dog dataset. Download images from [here](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ) and save them in a directory. Make sure to set `INSTANCE_DIR` to the name of your directory further below. This will be our training data. - -Now, you can launch the training. Here we will use [Stable Diffusion 1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5). - -**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** - -**___Note: It is quite useful to monitor the training progress by regularly generating sample images during training. [wandb](https://docs.wandb.ai/quickstart) is a nice solution to easily see generating images during training. All you need to do is to run `pip install wandb` before training and pass `--report_to="wandb"` to automatically log images.___** - - -```bash -export MODEL_NAME="runwayml/stable-diffusion-v1-5" -export INSTANCE_DIR="dog" -export OUTPUT_DIR="path-to-save-model" -``` - -For this example we want to directly store the trained LoRA embeddings on the Hub, so -we need to be logged in and add the `--push_to_hub` flag. - -```bash -huggingface-cli login -``` - -Now we can start training! - -```bash -accelerate launch train_dreambooth_lora.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a photo of sks dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=1 \ - --checkpointing_steps=100 \ - --learning_rate=1e-4 \ - --report_to="wandb" \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --max_train_steps=500 \ - --validation_prompt="A photo of sks dog in a bucket" \ - --validation_epochs=50 \ - --seed="0" \ - --push_to_hub -``` - -**___Note: When using LoRA we can use a much higher learning rate compared to vanilla dreambooth. Here we -use *1e-4* instead of the usual *2e-6*.___** - -The final LoRA embedding weights have been uploaded to [patrickvonplaten/lora_dreambooth_dog_example](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example). **___Note: [The final weights](https://huggingface.co/patrickvonplaten/lora/blob/main/pytorch_attn_procs.bin) are only 3 MB in size which is orders of magnitudes smaller than the original model.** - -The training results are summarized [here](https://api.wandb.ai/report/patrickvonplaten/xm6cd5q5). -You can use the `Step` slider to see how the model learned the features of our subject while the model trained. - -Optionally, we can also train additional LoRA layers for the text encoder. Specify the `--train_text_encoder` argument above for that. If you're interested to know more about how we -enable this support, check out this [PR](https://github.com/huggingface/diffusers/pull/2918). - -With the default hyperparameters from the above, the training seems to go in a positive direction. Check out [this panel](https://wandb.ai/sayakpaul/dreambooth-lora/reports/test-23-04-17-17-00-13---Vmlldzo0MDkwNjMy). The trained LoRA layers are available [here](https://huggingface.co/sayakpaul/dreambooth). - - -### Inference - -After training, LoRA weights can be loaded very easily into the original pipeline. First, you need to -load the original pipeline: - -```python -from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler -import torch - -pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) -pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) -pipe.to("cuda") -``` - -Next, we can load the adapter layers into the UNet with the [`load_attn_procs` function](https://huggingface.co/docs/diffusers/api/loaders#diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs). - -```python -pipe.unet.load_attn_procs("patrickvonplaten/lora_dreambooth_dog_example") -``` - -Finally, we can run the model in inference. - -```python -image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0] -``` - -If you are loading the LoRA parameters from the Hub and if the Hub repository has -a `base_model` tag (such as [this](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example/blob/main/README.md?code=true#L4)), then -you can do: - -```py -from huggingface_hub.repocard import RepoCard - -lora_model_id = "patrickvonplaten/lora_dreambooth_dog_example" -card = RepoCard.load(lora_model_id) -base_model_id = card.data.to_dict()["base_model"] - -pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) -... -``` - -If you used `--train_text_encoder` during training, then use `pipe.load_lora_weights()` to load the LoRA -weights. For example: - -```python -from huggingface_hub.repocard import RepoCard -from diffusers import StableDiffusionPipeline -import torch - -lora_model_id = "sayakpaul/dreambooth-text-encoder-test" -card = RepoCard.load(lora_model_id) -base_model_id = card.data.to_dict()["base_model"] - -pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) -pipe = pipe.to("cuda") -pipe.load_lora_weights(lora_model_id) -image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0] -``` - -Note that the use of [`LoraLoaderMixin.load_lora_weights`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.load_lora_weights) is preferred to [`UNet2DConditionLoadersMixin.load_attn_procs`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs) for loading LoRA parameters. This is because -`LoraLoaderMixin.load_lora_weights` can handle the following situations: - -* LoRA parameters that don't have separate identifiers for the UNet and the text encoder (such as [`"patrickvonplaten/lora_dreambooth_dog_example"`](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example)). So, you can just do: - - ```py - pipe.load_lora_weights(lora_model_path) - ``` - -* LoRA parameters that have separate identifiers for the UNet and the text encoder such as: [`"sayakpaul/dreambooth"`](https://huggingface.co/sayakpaul/dreambooth). - -## Training with Flax/JAX - -For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script. - -____Note: The flax example don't yet support features like gradient checkpoint, gradient accumulation etc, so to use flax for faster training we will need >30GB cards.___ - - -Before running the scripts, make sure to install the library's training dependencies: - -```bash -pip install -U -r requirements_flax.txt -``` - - -### Training without prior preservation loss - -```bash -export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" -export INSTANCE_DIR="dog" -export OUTPUT_DIR="path-to-save-model" - -python train_dreambooth_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a photo of sks dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --learning_rate=5e-6 \ - --max_train_steps=400 -``` - - -### Training with prior preservation loss - -```bash -export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" -export INSTANCE_DIR="dog" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -python train_dreambooth_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --learning_rate=5e-6 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - - -### Fine-tune text encoder with the UNet. - -```bash -export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" -export INSTANCE_DIR="dog" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -python train_dreambooth_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_text_encoder \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --learning_rate=2e-6 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - -### Training with xformers: -You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation. - -You can also use Dreambooth to train the specialized in-painting model. See [the script in the research folder for details](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/dreambooth_inpaint). - -### Set grads to none - -To save even more memory, pass the `--set_grads_to_none` argument to the script. This will set grads to None instead of zero. However, be aware that it changes certain behaviors, so if you start experiencing any problems, remove this argument. - -More info: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html - -### Experimental results -You can refer to [this blog post](https://huggingface.co/blog/dreambooth) that discusses some of DreamBooth experiments in detail. Specifically, it recommends a set of DreamBooth-specific tips and tricks that we have found to work well for a variety of subjects. - -## IF - -You can use the lora and full dreambooth scripts to train the text to image [IF model](https://huggingface.co/DeepFloyd/IF-I-XL-v1.0) and the stage II upscaler -[IF model](https://huggingface.co/DeepFloyd/IF-II-L-v1.0). - -Note that IF has a predicted variance, and our finetuning scripts only train the models predicted error, so for finetuned IF models we switch to a fixed -variance schedule. The full finetuning scripts will update the scheduler config for the full saved model. However, when loading saved LoRA weights, you -must also update the pipeline's scheduler config. - -```py -from diffusers import DiffusionPipeline - -pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0") - -pipe.load_lora_weights("") - -# Update scheduler config to fixed variance schedule -pipe.scheduler = pipe.scheduler.__class__.from_config(pipe.scheduler.config, variance_type="fixed_small") -``` - -Additionally, a few alternative cli flags are needed for IF. - -`--resolution=64`: IF is a pixel space diffusion model. In order to operate on un-compressed pixels, the input images are of a much smaller resolution. - -`--pre_compute_text_embeddings`: IF uses [T5](https://huggingface.co/docs/transformers/model_doc/t5) for its text encoder. In order to save GPU memory, we pre compute all text embeddings and then de-allocate -T5. - -`--tokenizer_max_length=77`: T5 has a longer default text length, but the default IF encoding procedure uses a smaller number. - -`--text_encoder_use_attention_mask`: T5 passes the attention mask to the text encoder. - -### Tips and Tricks -We find LoRA to be sufficient for finetuning the stage I model as the low resolution of the model makes representing finegrained detail hard regardless. - -For common and/or not-visually complex object concepts, you can get away with not-finetuning the upscaler. Just be sure to adjust the prompt passed to the -upscaler to remove the new token from the instance prompt. I.e. if your stage I prompt is "a sks dog", use "a dog" for your stage II prompt. - -For finegrained detail like faces that aren't present in the original training set, we find that full finetuning of the stage II upscaler is better than -LoRA finetuning stage II. - -For finegrained detail like faces, we find that lower learning rates along with larger batch sizes work best. - -For stage II, we find that lower learning rates are also needed. - -We found experimentally that the DDPM scheduler with the default larger number of denoising steps to sometimes work better than the DPM Solver scheduler -used in the training scripts. - -### Stage II additional validation images - -The stage II validation requires images to upscale, we can download a downsized version of the training set: - -```py -from huggingface_hub import snapshot_download - -local_dir = "./dog_downsized" -snapshot_download( - "diffusers/dog-example-downsized", - local_dir=local_dir, - repo_type="dataset", - ignore_patterns=".gitattributes", -) -``` - -### IF stage I LoRA Dreambooth -This training configuration requires ~28 GB VRAM. - -```sh -export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0" -export INSTANCE_DIR="dog" -export OUTPUT_DIR="dreambooth_dog_lora" - -accelerate launch train_dreambooth_lora.py \ - --report_to wandb \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a sks dog" \ - --resolution=64 \ - --train_batch_size=4 \ - --gradient_accumulation_steps=1 \ - --learning_rate=5e-6 \ - --scale_lr \ - --max_train_steps=1200 \ - --validation_prompt="a sks dog" \ - --validation_epochs=25 \ - --checkpointing_steps=100 \ - --pre_compute_text_embeddings \ - --tokenizer_max_length=77 \ - --text_encoder_use_attention_mask -``` - -### IF stage II LoRA Dreambooth - -`--validation_images`: These images are upscaled during validation steps. - -`--class_labels_conditioning=timesteps`: Pass additional conditioning to the UNet needed for stage II. - -`--learning_rate=1e-6`: Lower learning rate than stage I. - -`--resolution=256`: The upscaler expects higher resolution inputs - -```sh -export MODEL_NAME="DeepFloyd/IF-II-L-v1.0" -export INSTANCE_DIR="dog" -export OUTPUT_DIR="dreambooth_dog_upscale" -export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png" - -python train_dreambooth_lora.py \ - --report_to wandb \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a sks dog" \ - --resolution=256 \ - --train_batch_size=4 \ - --gradient_accumulation_steps=1 \ - --learning_rate=1e-6 \ - --max_train_steps=2000 \ - --validation_prompt="a sks dog" \ - --validation_epochs=100 \ - --checkpointing_steps=500 \ - --pre_compute_text_embeddings \ - --tokenizer_max_length=77 \ - --text_encoder_use_attention_mask \ - --validation_images $VALIDATION_IMAGES \ - --class_labels_conditioning=timesteps -``` - -### IF Stage I Full Dreambooth -`--skip_save_text_encoder`: When training the full model, this will skip saving the entire T5 with the finetuned model. You can still load the pipeline -with a T5 loaded from the original model. - -`use_8bit_adam`: Due to the size of the optimizer states, we recommend training the full XL IF model with 8bit adam. - -`--learning_rate=1e-7`: For full dreambooth, IF requires very low learning rates. With higher learning rates model quality will degrade. Note that it is -likely the learning rate can be increased with larger batch sizes. - -Using 8bit adam and a batch size of 4, the model can be trained in ~48 GB VRAM. - -```sh -export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0" - -export INSTANCE_DIR="dog" -export OUTPUT_DIR="dreambooth_if" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a photo of sks dog" \ - --resolution=64 \ - --train_batch_size=4 \ - --gradient_accumulation_steps=1 \ - --learning_rate=1e-7 \ - --max_train_steps=150 \ - --validation_prompt "a photo of sks dog" \ - --validation_steps 25 \ - --text_encoder_use_attention_mask \ - --tokenizer_max_length 77 \ - --pre_compute_text_embeddings \ - --use_8bit_adam \ - --set_grads_to_none \ - --skip_save_text_encoder \ - --push_to_hub -``` - -### IF Stage II Full Dreambooth - -`--learning_rate=5e-6`: With a smaller effective batch size of 4, we found that we required learning rates as low as -1e-8. - -`--resolution=256`: The upscaler expects higher resolution inputs - -`--train_batch_size=2` and `--gradient_accumulation_steps=6`: We found that full training of stage II particularly with -faces required large effective batch sizes. - -```sh -export MODEL_NAME="DeepFloyd/IF-II-L-v1.0" -export INSTANCE_DIR="dog" -export OUTPUT_DIR="dreambooth_dog_upscale" -export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png" - -accelerate launch train_dreambooth.py \ - --report_to wandb \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a sks dog" \ - --resolution=256 \ - --train_batch_size=2 \ - --gradient_accumulation_steps=6 \ - --learning_rate=5e-6 \ - --max_train_steps=2000 \ - --validation_prompt="a sks dog" \ - --validation_steps=150 \ - --checkpointing_steps=500 \ - --pre_compute_text_embeddings \ - --tokenizer_max_length=77 \ - --text_encoder_use_attention_mask \ - --validation_images $VALIDATION_IMAGES \ - --class_labels_conditioning timesteps \ - --push_to_hub -``` - -## Stable Diffusion XL - -We support fine-tuning of the UNet shipped in [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) with DreamBooth and LoRA via the `train_dreambooth_lora_sdxl.py` script. Please refer to the docs [here](./README_sdxl.md). diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/demodata.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/demodata.py deleted file mode 100644 index feecb693745a47d9f2bebd8af9a217ff4f5cc92b..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/demodata.py +++ /dev/null @@ -1,41 +0,0 @@ -import numpy as np -import torch - -from mmdet.utils.util_random import ensure_rng - - -def random_boxes(num=1, scale=1, rng=None): - """Simple version of ``kwimage.Boxes.random`` - - Returns: - Tensor: shape (n, 4) in x1, y1, x2, y2 format. - - References: - https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 - - Example: - >>> num = 3 - >>> scale = 512 - >>> rng = 0 - >>> boxes = random_boxes(num, scale, rng) - >>> print(boxes) - tensor([[280.9925, 278.9802, 308.6148, 366.1769], - [216.9113, 330.6978, 224.0446, 456.5878], - [405.3632, 196.3221, 493.3953, 270.7942]]) - """ - rng = ensure_rng(rng) - - tlbr = rng.rand(num, 4).astype(np.float32) - - tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) - tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) - br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) - br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) - - tlbr[:, 0] = tl_x * scale - tlbr[:, 1] = tl_y * scale - tlbr[:, 2] = br_x * scale - tlbr[:, 3] = br_y * scale - - boxes = torch.from_numpy(tlbr) - return boxes diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/coco.py b/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/coco.py deleted file mode 100644 index 3a8e1bcfdd7f2854ca381d4f87788e3a63eb568c..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/coco.py +++ /dev/null @@ -1,546 +0,0 @@ -import itertools -import logging -import os.path as osp -import tempfile -from collections import OrderedDict - -import mmcv -import numpy as np -import pycocotools -from mmcv.utils import print_log -from pycocotools.coco import COCO -from pycocotools.cocoeval import COCOeval -from terminaltables import AsciiTable - -from mmdet.core import eval_recalls -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class CocoDataset(CustomDataset): - - CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', - 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', - 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', - 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', - 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', - 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', - 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', - 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', - 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', - 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', - 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', - 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') - - def load_annotations(self, ann_file): - """Load annotation from COCO style annotation file. - - Args: - ann_file (str): Path of annotation file. - - Returns: - list[dict]: Annotation info from COCO api. - """ - if not getattr(pycocotools, '__version__', '0') >= '12.0.2': - raise AssertionError( - 'Incompatible version of pycocotools is installed. ' - 'Run pip uninstall pycocotools first. Then run pip ' - 'install mmpycocotools to install open-mmlab forked ' - 'pycocotools.') - - self.coco = COCO(ann_file) - self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) - self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} - self.img_ids = self.coco.get_img_ids() - data_infos = [] - total_ann_ids = [] - for i in self.img_ids: - info = self.coco.load_imgs([i])[0] - info['filename'] = info['file_name'] - data_infos.append(info) - ann_ids = self.coco.get_ann_ids(img_ids=[i]) - total_ann_ids.extend(ann_ids) - assert len(set(total_ann_ids)) == len( - total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!" - return data_infos - - def get_ann_info(self, idx): - """Get COCO annotation by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - - img_id = self.data_infos[idx]['id'] - ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) - ann_info = self.coco.load_anns(ann_ids) - return self._parse_ann_info(self.data_infos[idx], ann_info) - - def get_cat_ids(self, idx): - """Get COCO category ids by index. - - Args: - idx (int): Index of data. - - Returns: - list[int]: All categories in the image of specified index. - """ - - img_id = self.data_infos[idx]['id'] - ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) - ann_info = self.coco.load_anns(ann_ids) - return [ann['category_id'] for ann in ann_info] - - def _filter_imgs(self, min_size=32): - """Filter images too small or without ground truths.""" - valid_inds = [] - # obtain images that contain annotation - ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) - # obtain images that contain annotations of the required categories - ids_in_cat = set() - for i, class_id in enumerate(self.cat_ids): - ids_in_cat |= set(self.coco.cat_img_map[class_id]) - # merge the image id sets of the two conditions and use the merged set - # to filter out images if self.filter_empty_gt=True - ids_in_cat &= ids_with_ann - - valid_img_ids = [] - for i, img_info in enumerate(self.data_infos): - img_id = self.img_ids[i] - if self.filter_empty_gt and img_id not in ids_in_cat: - continue - if min(img_info['width'], img_info['height']) >= min_size: - valid_inds.append(i) - valid_img_ids.append(img_id) - self.img_ids = valid_img_ids - return valid_inds - - def _parse_ann_info(self, img_info, ann_info): - """Parse bbox and mask annotation. - - Args: - ann_info (list[dict]): Annotation info of an image. - with_mask (bool): Whether to parse mask annotations. - - Returns: - dict: A dict containing the following keys: bboxes, bboxes_ignore,\ - labels, masks, seg_map. "masks" are raw annotations and not \ - decoded into binary masks. - """ - gt_bboxes = [] - gt_labels = [] - gt_bboxes_ignore = [] - gt_masks_ann = [] - for i, ann in enumerate(ann_info): - if ann.get('ignore', False): - continue - x1, y1, w, h = ann['bbox'] - inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) - inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) - if inter_w * inter_h == 0: - continue - if ann['area'] <= 0 or w < 1 or h < 1: - continue - if ann['category_id'] not in self.cat_ids: - continue - bbox = [x1, y1, x1 + w, y1 + h] - if ann.get('iscrowd', False): - gt_bboxes_ignore.append(bbox) - else: - gt_bboxes.append(bbox) - gt_labels.append(self.cat2label[ann['category_id']]) - gt_masks_ann.append(ann.get('segmentation', None)) - - if gt_bboxes: - gt_bboxes = np.array(gt_bboxes, dtype=np.float32) - gt_labels = np.array(gt_labels, dtype=np.int64) - else: - gt_bboxes = np.zeros((0, 4), dtype=np.float32) - gt_labels = np.array([], dtype=np.int64) - - if gt_bboxes_ignore: - gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) - else: - gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) - - seg_map = img_info['filename'].replace('jpg', 'png') - - ann = dict( - bboxes=gt_bboxes, - labels=gt_labels, - bboxes_ignore=gt_bboxes_ignore, - masks=gt_masks_ann, - seg_map=seg_map) - - return ann - - def xyxy2xywh(self, bbox): - """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO - evaluation. - - Args: - bbox (numpy.ndarray): The bounding boxes, shape (4, ), in - ``xyxy`` order. - - Returns: - list[float]: The converted bounding boxes, in ``xywh`` order. - """ - - _bbox = bbox.tolist() - return [ - _bbox[0], - _bbox[1], - _bbox[2] - _bbox[0], - _bbox[3] - _bbox[1], - ] - - def _proposal2json(self, results): - """Convert proposal results to COCO json style.""" - json_results = [] - for idx in range(len(self)): - img_id = self.img_ids[idx] - bboxes = results[idx] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(bboxes[i][4]) - data['category_id'] = 1 - json_results.append(data) - return json_results - - def _det2json(self, results): - """Convert detection results to COCO json style.""" - json_results = [] - for idx in range(len(self)): - img_id = self.img_ids[idx] - result = results[idx] - for label in range(len(result)): - bboxes = result[label] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(bboxes[i][4]) - data['category_id'] = self.cat_ids[label] - json_results.append(data) - return json_results - - def _segm2json(self, results): - """Convert instance segmentation results to COCO json style.""" - bbox_json_results = [] - segm_json_results = [] - for idx in range(len(self)): - img_id = self.img_ids[idx] - det, seg = results[idx] - for label in range(len(det)): - # bbox results - bboxes = det[label] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(bboxes[i][4]) - data['category_id'] = self.cat_ids[label] - bbox_json_results.append(data) - - # segm results - # some detectors use different scores for bbox and mask - if isinstance(seg, tuple): - segms = seg[0][label] - mask_score = seg[1][label] - else: - segms = seg[label] - mask_score = [bbox[4] for bbox in bboxes] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(mask_score[i]) - data['category_id'] = self.cat_ids[label] - if isinstance(segms[i]['counts'], bytes): - segms[i]['counts'] = segms[i]['counts'].decode() - data['segmentation'] = segms[i] - segm_json_results.append(data) - return bbox_json_results, segm_json_results - - def results2json(self, results, outfile_prefix): - """Dump the detection results to a COCO style json file. - - There are 3 types of results: proposals, bbox predictions, mask - predictions, and they have different data types. This method will - automatically recognize the type, and dump them to json files. - - Args: - results (list[list | tuple | ndarray]): Testing results of the - dataset. - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json files will be named - "somepath/xxx.bbox.json", "somepath/xxx.segm.json", - "somepath/xxx.proposal.json". - - Returns: - dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \ - values are corresponding filenames. - """ - result_files = dict() - if isinstance(results[0], list): - json_results = self._det2json(results) - result_files['bbox'] = f'{outfile_prefix}.bbox.json' - result_files['proposal'] = f'{outfile_prefix}.bbox.json' - mmcv.dump(json_results, result_files['bbox']) - elif isinstance(results[0], tuple): - json_results = self._segm2json(results) - result_files['bbox'] = f'{outfile_prefix}.bbox.json' - result_files['proposal'] = f'{outfile_prefix}.bbox.json' - result_files['segm'] = f'{outfile_prefix}.segm.json' - mmcv.dump(json_results[0], result_files['bbox']) - mmcv.dump(json_results[1], result_files['segm']) - elif isinstance(results[0], np.ndarray): - json_results = self._proposal2json(results) - result_files['proposal'] = f'{outfile_prefix}.proposal.json' - mmcv.dump(json_results, result_files['proposal']) - else: - raise TypeError('invalid type of results') - return result_files - - def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None): - gt_bboxes = [] - for i in range(len(self.img_ids)): - ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i]) - ann_info = self.coco.load_anns(ann_ids) - if len(ann_info) == 0: - gt_bboxes.append(np.zeros((0, 4))) - continue - bboxes = [] - for ann in ann_info: - if ann.get('ignore', False) or ann['iscrowd']: - continue - x1, y1, w, h = ann['bbox'] - bboxes.append([x1, y1, x1 + w, y1 + h]) - bboxes = np.array(bboxes, dtype=np.float32) - if bboxes.shape[0] == 0: - bboxes = np.zeros((0, 4)) - gt_bboxes.append(bboxes) - - recalls = eval_recalls( - gt_bboxes, results, proposal_nums, iou_thrs, logger=logger) - ar = recalls.mean(axis=1) - return ar - - def format_results(self, results, jsonfile_prefix=None, **kwargs): - """Format the results to json (standard format for COCO evaluation). - - Args: - results (list[tuple | numpy.ndarray]): Testing results of the - dataset. - jsonfile_prefix (str | None): The prefix of json files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. Default: None. - - Returns: - tuple: (result_files, tmp_dir), result_files is a dict containing \ - the json filepaths, tmp_dir is the temporal directory created \ - for saving json files when jsonfile_prefix is not specified. - """ - assert isinstance(results, list), 'results must be a list' - assert len(results) == len(self), ( - 'The length of results is not equal to the dataset len: {} != {}'. - format(len(results), len(self))) - - if jsonfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - jsonfile_prefix = osp.join(tmp_dir.name, 'results') - else: - tmp_dir = None - result_files = self.results2json(results, jsonfile_prefix) - return result_files, tmp_dir - - def evaluate(self, - results, - metric='bbox', - logger=None, - jsonfile_prefix=None, - classwise=False, - proposal_nums=(100, 300, 1000), - iou_thrs=None, - metric_items=None): - """Evaluation in COCO protocol. - - Args: - results (list[list | tuple]): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. Options are - 'bbox', 'segm', 'proposal', 'proposal_fast'. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - jsonfile_prefix (str | None): The prefix of json files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. Default: None. - classwise (bool): Whether to evaluating the AP for each class. - proposal_nums (Sequence[int]): Proposal number used for evaluating - recalls, such as recall@100, recall@1000. - Default: (100, 300, 1000). - iou_thrs (Sequence[float], optional): IoU threshold used for - evaluating recalls/mAPs. If set to a list, the average of all - IoUs will also be computed. If not specified, [0.50, 0.55, - 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. - Default: None. - metric_items (list[str] | str, optional): Metric items that will - be returned. If not specified, ``['AR@100', 'AR@300', - 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be - used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', - 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when - ``metric=='bbox' or metric=='segm'``. - - Returns: - dict[str, float]: COCO style evaluation metric. - """ - - metrics = metric if isinstance(metric, list) else [metric] - allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] - for metric in metrics: - if metric not in allowed_metrics: - raise KeyError(f'metric {metric} is not supported') - if iou_thrs is None: - iou_thrs = np.linspace( - .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) - if metric_items is not None: - if not isinstance(metric_items, list): - metric_items = [metric_items] - - result_files, tmp_dir = self.format_results(results, jsonfile_prefix) - - eval_results = OrderedDict() - cocoGt = self.coco - for metric in metrics: - msg = f'Evaluating {metric}...' - if logger is None: - msg = '\n' + msg - print_log(msg, logger=logger) - - if metric == 'proposal_fast': - ar = self.fast_eval_recall( - results, proposal_nums, iou_thrs, logger='silent') - log_msg = [] - for i, num in enumerate(proposal_nums): - eval_results[f'AR@{num}'] = ar[i] - log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}') - log_msg = ''.join(log_msg) - print_log(log_msg, logger=logger) - continue - - if metric not in result_files: - raise KeyError(f'{metric} is not in results') - try: - cocoDt = cocoGt.loadRes(result_files[metric]) - except IndexError: - print_log( - 'The testing results of the whole dataset is empty.', - logger=logger, - level=logging.ERROR) - break - - iou_type = 'bbox' if metric == 'proposal' else metric - cocoEval = COCOeval(cocoGt, cocoDt, iou_type) - cocoEval.params.catIds = self.cat_ids - cocoEval.params.imgIds = self.img_ids - cocoEval.params.maxDets = list(proposal_nums) - cocoEval.params.iouThrs = iou_thrs - # mapping of cocoEval.stats - coco_metric_names = { - 'mAP': 0, - 'mAP_50': 1, - 'mAP_75': 2, - 'mAP_s': 3, - 'mAP_m': 4, - 'mAP_l': 5, - 'AR@100': 6, - 'AR@300': 7, - 'AR@1000': 8, - 'AR_s@1000': 9, - 'AR_m@1000': 10, - 'AR_l@1000': 11 - } - if metric_items is not None: - for metric_item in metric_items: - if metric_item not in coco_metric_names: - raise KeyError( - f'metric item {metric_item} is not supported') - - if metric == 'proposal': - cocoEval.params.useCats = 0 - cocoEval.evaluate() - cocoEval.accumulate() - cocoEval.summarize() - if metric_items is None: - metric_items = [ - 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', - 'AR_m@1000', 'AR_l@1000' - ] - - for item in metric_items: - val = float( - f'{cocoEval.stats[coco_metric_names[item]]:.3f}') - eval_results[item] = val - else: - cocoEval.evaluate() - cocoEval.accumulate() - cocoEval.summarize() - if classwise: # Compute per-category AP - # Compute per-category AP - # from https://github.com/facebookresearch/detectron2/ - precisions = cocoEval.eval['precision'] - # precision: (iou, recall, cls, area range, max dets) - assert len(self.cat_ids) == precisions.shape[2] - - results_per_category = [] - for idx, catId in enumerate(self.cat_ids): - # area range index 0: all area ranges - # max dets index -1: typically 100 per image - nm = self.coco.loadCats(catId)[0] - precision = precisions[:, :, idx, 0, -1] - precision = precision[precision > -1] - if precision.size: - ap = np.mean(precision) - else: - ap = float('nan') - results_per_category.append( - (f'{nm["name"]}', f'{float(ap):0.3f}')) - - num_columns = min(6, len(results_per_category) * 2) - results_flatten = list( - itertools.chain(*results_per_category)) - headers = ['category', 'AP'] * (num_columns // 2) - results_2d = itertools.zip_longest(*[ - results_flatten[i::num_columns] - for i in range(num_columns) - ]) - table_data = [headers] - table_data += [result for result in results_2d] - table = AsciiTable(table_data) - print_log('\n' + table.table, logger=logger) - - if metric_items is None: - metric_items = [ - 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l' - ] - - for metric_item in metric_items: - key = f'{metric}_{metric_item}' - val = float( - f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}' - ) - eval_results[key] = val - ap = cocoEval.stats[:6] - eval_results[f'{metric}_mAP_copypaste'] = ( - f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' - f'{ap[4]:.3f} {ap[5]:.3f}') - if tmp_dir is not None: - tmp_dir.cleanup() - return eval_results diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/custom.py b/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/custom.py deleted file mode 100644 index 1a2351c217f43d32178053dfc682a2b241f9a3f1..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/custom.py +++ /dev/null @@ -1,323 +0,0 @@ -import os.path as osp -import warnings -from collections import OrderedDict - -import mmcv -import numpy as np -from mmcv.utils import print_log -from torch.utils.data import Dataset - -from mmdet.core import eval_map, eval_recalls -from .builder import DATASETS -from .pipelines import Compose - - -@DATASETS.register_module() -class CustomDataset(Dataset): - """Custom dataset for detection. - - The annotation format is shown as follows. The `ann` field is optional for - testing. - - .. code-block:: none - - [ - { - 'filename': 'a.jpg', - 'width': 1280, - 'height': 720, - 'ann': { - 'bboxes': (n, 4) in (x1, y1, x2, y2) order. - 'labels': (n, ), - 'bboxes_ignore': (k, 4), (optional field) - 'labels_ignore': (k, 4) (optional field) - } - }, - ... - ] - - Args: - ann_file (str): Annotation file path. - pipeline (list[dict]): Processing pipeline. - classes (str | Sequence[str], optional): Specify classes to load. - If is None, ``cls.CLASSES`` will be used. Default: None. - data_root (str, optional): Data root for ``ann_file``, - ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified. - test_mode (bool, optional): If set True, annotation will not be loaded. - filter_empty_gt (bool, optional): If set true, images without bounding - boxes of the dataset's classes will be filtered out. This option - only works when `test_mode=False`, i.e., we never filter images - during tests. - """ - - CLASSES = None - - def __init__(self, - ann_file, - pipeline, - classes=None, - data_root=None, - img_prefix='', - seg_prefix=None, - proposal_file=None, - test_mode=False, - filter_empty_gt=True): - self.ann_file = ann_file - self.data_root = data_root - self.img_prefix = img_prefix - self.seg_prefix = seg_prefix - self.proposal_file = proposal_file - self.test_mode = test_mode - self.filter_empty_gt = filter_empty_gt - self.CLASSES = self.get_classes(classes) - - # join paths if data_root is specified - if self.data_root is not None: - if not osp.isabs(self.ann_file): - self.ann_file = osp.join(self.data_root, self.ann_file) - if not (self.img_prefix is None or osp.isabs(self.img_prefix)): - self.img_prefix = osp.join(self.data_root, self.img_prefix) - if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)): - self.seg_prefix = osp.join(self.data_root, self.seg_prefix) - if not (self.proposal_file is None - or osp.isabs(self.proposal_file)): - self.proposal_file = osp.join(self.data_root, - self.proposal_file) - # load annotations (and proposals) - self.data_infos = self.load_annotations(self.ann_file) - - if self.proposal_file is not None: - self.proposals = self.load_proposals(self.proposal_file) - else: - self.proposals = None - - # filter images too small and containing no annotations - if not test_mode: - valid_inds = self._filter_imgs() - self.data_infos = [self.data_infos[i] for i in valid_inds] - if self.proposals is not None: - self.proposals = [self.proposals[i] for i in valid_inds] - # set group flag for the sampler - self._set_group_flag() - - # processing pipeline - self.pipeline = Compose(pipeline) - - def __len__(self): - """Total number of samples of data.""" - return len(self.data_infos) - - def load_annotations(self, ann_file): - """Load annotation from annotation file.""" - return mmcv.load(ann_file) - - def load_proposals(self, proposal_file): - """Load proposal from proposal file.""" - return mmcv.load(proposal_file) - - def get_ann_info(self, idx): - """Get annotation by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - - return self.data_infos[idx]['ann'] - - def get_cat_ids(self, idx): - """Get category ids by index. - - Args: - idx (int): Index of data. - - Returns: - list[int]: All categories in the image of specified index. - """ - - return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist() - - def pre_pipeline(self, results): - """Prepare results dict for pipeline.""" - results['img_prefix'] = self.img_prefix - results['seg_prefix'] = self.seg_prefix - results['proposal_file'] = self.proposal_file - results['bbox_fields'] = [] - results['mask_fields'] = [] - results['seg_fields'] = [] - - def _filter_imgs(self, min_size=32): - """Filter images too small.""" - if self.filter_empty_gt: - warnings.warn( - 'CustomDataset does not support filtering empty gt images.') - valid_inds = [] - for i, img_info in enumerate(self.data_infos): - if min(img_info['width'], img_info['height']) >= min_size: - valid_inds.append(i) - return valid_inds - - def _set_group_flag(self): - """Set flag according to image aspect ratio. - - Images with aspect ratio greater than 1 will be set as group 1, - otherwise group 0. - """ - self.flag = np.zeros(len(self), dtype=np.uint8) - for i in range(len(self)): - img_info = self.data_infos[i] - if img_info['width'] / img_info['height'] > 1: - self.flag[i] = 1 - - def _rand_another(self, idx): - """Get another random index from the same group as the given index.""" - pool = np.where(self.flag == self.flag[idx])[0] - return np.random.choice(pool) - - def __getitem__(self, idx): - """Get training/test data after pipeline. - - Args: - idx (int): Index of data. - - Returns: - dict: Training/test data (with annotation if `test_mode` is set \ - True). - """ - - if self.test_mode: - return self.prepare_test_img(idx) - while True: - data = self.prepare_train_img(idx) - if data is None: - idx = self._rand_another(idx) - continue - return data - - def prepare_train_img(self, idx): - """Get training data and annotations after pipeline. - - Args: - idx (int): Index of data. - - Returns: - dict: Training data and annotation after pipeline with new keys \ - introduced by pipeline. - """ - - img_info = self.data_infos[idx] - ann_info = self.get_ann_info(idx) - results = dict(img_info=img_info, ann_info=ann_info) - if self.proposals is not None: - results['proposals'] = self.proposals[idx] - self.pre_pipeline(results) - return self.pipeline(results) - - def prepare_test_img(self, idx): - """Get testing data after pipeline. - - Args: - idx (int): Index of data. - - Returns: - dict: Testing data after pipeline with new keys introduced by \ - pipeline. - """ - - img_info = self.data_infos[idx] - results = dict(img_info=img_info) - if self.proposals is not None: - results['proposals'] = self.proposals[idx] - self.pre_pipeline(results) - return self.pipeline(results) - - @classmethod - def get_classes(cls, classes=None): - """Get class names of current dataset. - - Args: - classes (Sequence[str] | str | None): If classes is None, use - default CLASSES defined by builtin dataset. If classes is a - string, take it as a file name. The file contains the name of - classes where each line contains one class name. If classes is - a tuple or list, override the CLASSES defined by the dataset. - - Returns: - tuple[str] or list[str]: Names of categories of the dataset. - """ - if classes is None: - return cls.CLASSES - - if isinstance(classes, str): - # take it as a file path - class_names = mmcv.list_from_file(classes) - elif isinstance(classes, (tuple, list)): - class_names = classes - else: - raise ValueError(f'Unsupported type {type(classes)} of classes.') - - return class_names - - def format_results(self, results, **kwargs): - """Place holder to format result to dataset specific output.""" - - def evaluate(self, - results, - metric='mAP', - logger=None, - proposal_nums=(100, 300, 1000), - iou_thr=0.5, - scale_ranges=None): - """Evaluate the dataset. - - Args: - results (list): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. - logger (logging.Logger | None | str): Logger used for printing - related information during evaluation. Default: None. - proposal_nums (Sequence[int]): Proposal number used for evaluating - recalls, such as recall@100, recall@1000. - Default: (100, 300, 1000). - iou_thr (float | list[float]): IoU threshold. Default: 0.5. - scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP. - Default: None. - """ - - if not isinstance(metric, str): - assert len(metric) == 1 - metric = metric[0] - allowed_metrics = ['mAP', 'recall'] - if metric not in allowed_metrics: - raise KeyError(f'metric {metric} is not supported') - annotations = [self.get_ann_info(i) for i in range(len(self))] - eval_results = OrderedDict() - iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr - if metric == 'mAP': - assert isinstance(iou_thrs, list) - mean_aps = [] - for iou_thr in iou_thrs: - print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}') - mean_ap, _ = eval_map( - results, - annotations, - scale_ranges=scale_ranges, - iou_thr=iou_thr, - dataset=self.CLASSES, - logger=logger) - mean_aps.append(mean_ap) - eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) - eval_results['mAP'] = sum(mean_aps) / len(mean_aps) - elif metric == 'recall': - gt_bboxes = [ann['bboxes'] for ann in annotations] - recalls = eval_recalls( - gt_bboxes, results, proposal_nums, iou_thr, logger=logger) - for i, num in enumerate(proposal_nums): - for j, iou in enumerate(iou_thrs): - eval_results[f'recall@{num}@{iou}'] = recalls[i, j] - if recalls.shape[1] > 1: - ar = recalls.mean(axis=1) - for i, num in enumerate(proposal_nums): - eval_results[f'AR@{num}'] = ar[i] - return eval_results diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/github.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/github.py deleted file mode 100644 index 282267b6be7f3b0371a3fd332f98e38611c9fb9a..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/github.py +++ /dev/null @@ -1,38 +0,0 @@ -import subprocess -from pathlib import Path - -new_extensions = set() - - -def clone_or_pull_repository(github_url): - global new_extensions - - repository_folder = Path("extensions") - repo_name = github_url.rstrip("/").split("/")[-1].split(".")[0] - - # Check if the repository folder exists - if not repository_folder.exists(): - repository_folder.mkdir(parents=True) - - repo_path = repository_folder / repo_name - - # Check if the repository is already cloned - if repo_path.exists(): - yield f"Updating {github_url}..." - # Perform a 'git pull' to update the repository - try: - pull_output = subprocess.check_output(["git", "-C", repo_path, "pull"], stderr=subprocess.STDOUT) - yield "Done." - return pull_output.decode() - except subprocess.CalledProcessError as e: - return str(e) - - # Clone the repository - try: - yield f"Cloning {github_url}..." - clone_output = subprocess.check_output(["git", "clone", github_url, repo_path], stderr=subprocess.STDOUT) - new_extensions.add(repo_name) - yield f"The extension `{repo_name}` has been downloaded.\n\nPlease close the the web UI completely and launch it again to be able to load it." - return clone_output.decode() - except subprocess.CalledProcessError as e: - return str(e) diff --git a/spaces/Anonymous-123/ImageNet-Editing/app.py b/spaces/Anonymous-123/ImageNet-Editing/app.py deleted file mode 100644 index 6a0c9691f3338d0a20157ed7d5bf32b76b3e6cf6..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/app.py +++ /dev/null @@ -1,102 +0,0 @@ -import os -import gradio as gr -import sys -sys.path.append(".") - -#@title Import stuff -import gc - -import subprocess -import shutil -from PIL import Image -import time - -import imageio - - -# def run(initial_image, mask, Backgrounds, Backgrounds_complexity, Size, Angle, Steps, num_of_Images): -def run(source_img, Backgrounds, Backgrounds_complexity, Size, Angle, Steps, num_of_Images): - print('-------------------starting to process-------------------') - if os.path.exists('results'): - shutil.rmtree("results") - if os.path.exists('tmp'): - shutil.rmtree("tmp") - time.sleep(1) - os.makedirs('results', exist_ok=True) - os.makedirs('tmp/img', exist_ok=True) - os.makedirs('tmp/mask', exist_ok=True) - os.makedirs('tmp/bg', exist_ok=True) - - ''' - print('-----initial_image: ', initial_image) - init_image = Image.open(initial_image) - mask = Image.open(mask) - init_image = init_image.resize((256,256)) - mask = mask.resize((256,256)) - init_image.save("tmp/img/input.JPEG") - mask.save("tmp/mask/input.png") - ''' - imageio.imwrite("tmp/img/input.JPEG", source_img["image"]) - imageio.imwrite("tmp/mask/input.png", source_img["mask"]) - - initial_image = Image.open('tmp/img/input.JPEG').resize((256,256)) - initial_image.save('tmp/img/input.JPEG') - mask = Image.open('tmp/mask/input.png').resize((256,256)) - mask.save('tmp/mask/input.png') - - - if Backgrounds: - background_specific = Backgrounds - if background_specific is not None: - background_specific = Image.open(background_specific).convert('RGB') # Specified background - background_specific = background_specific.resize((256,256)) - background_specific.save('tmp/bg/bg.png') - background_specific = '../tmp/bg/bg.png' - else: - background_specific = "" - - Backgrounds_complexity = Backgrounds_complexity - Size = Size - Angle = Angle - Steps = Steps - num_of_Images = num_of_Images - print(Backgrounds_complexity, background_specific, Size, Angle, Steps, num_of_Images) - p = subprocess.Popen(["sh", "run.sh", str(Backgrounds_complexity), background_specific, str(Size), str(Angle), str(Steps), str(num_of_Images)]) - - # subprocess.Popen(["cd", "object_removal/TFill/"]) - # subprocess.Popen(["python", "test.py"]) - - return_code = p.wait() - print('----return_code: ', return_code) - - if os.path.exists('results/edited.png'): - return Image.open('results/edited.png') - else: - return Image.open('tmp/img/input.JPEG') - - -image = gr.outputs.Image(type="pil", label="Your result") -css = ".output-image{height: 528px !important} .output-carousel .output-image{height:272px !important} a{text-decoration: underline}" -iface = gr.Interface(fn=run, inputs=[ - # gr.inputs.Image(type="filepath", label='initial_image'), - gr.Image(source="upload", type="numpy", tool="sketch", elem_id="source_container"), - # gr.inputs.Image(type="filepath", label='mask - object mask', optional=True), - gr.inputs.Image(type="filepath", label='Backgrounds - optional, specified backgrounds'), - gr.inputs.Slider(label="Backgrounds_complexity - How complicated you wish to the generated image to be", default=0, step=1, minimum=-30, maximum=30), - gr.inputs.Slider(label="Size - Object pixel rates", default=0.1, step=0.02, minimum=0.01, maximum=0.5), - gr.inputs.Slider(label="Angle - Object angle", default=0, step=10, minimum=-180, maximum=180), - gr.inputs.Slider(label="Steps - more steps can increase quality but will take longer to generate",default=10,maximum=100,minimum=1,step=1), - gr.inputs.Slider(label="num_of_Images - How many images you wish to generate", default=2, step=1, minimum=1, maximum=4), - - # gr.inputs.Radio(label="Width", choices=[32,64,128,256],default=256), - # gr.inputs.Radio(label="Height", choices=[32,64,128,256],default=256), - # gr.inputs.Textbox(label="Prompt - try adding increments to your prompt such as 'oil on canvas', 'a painting', 'a book cover'",default="chalk pastel drawing of a dog wearing a funny hat"), - #gr.inputs.Slider(label="ETA - between 0 and 1. Lower values can provide better quality, higher values can be more diverse",default=0.0,minimum=0.0, maximum=1.0,step=0.1), - ], - # outputs=[image,gr.outputs.Carousel(label="Individual images",components=["image"]),gr.outputs.Textbox(label="Error")], - outputs=["image"], - css=css, - title="Image Editing with Controls of Object Attributes including Backgrounds, Sizes, Positions and Directions", - description="Demo for Image Editing with Controls of Object Attributes. *** NOTE!!! Due to the requirements of GPU, this demo cannot work on this website currently(it always returns the input image). Please download the codes and run them at your server. ***", - article="Our code are mostly developed based the codes of `Blended Diffusion for Text-driven Editing of Natural Images' and `TFill'") -iface.launch(enable_queue=True) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/download.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/download.py deleted file mode 100644 index 36e947c8c052dc48b2600c29575f5607a5144d5a..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/download.py +++ /dev/null @@ -1,143 +0,0 @@ -import logging -import os -from optparse import Values -from typing import List - -from pip._internal.cli import cmdoptions -from pip._internal.cli.cmdoptions import make_target_python -from pip._internal.cli.req_command import RequirementCommand, with_cleanup -from pip._internal.cli.status_codes import SUCCESS -from pip._internal.operations.build.build_tracker import get_build_tracker -from pip._internal.req.req_install import check_legacy_setup_py_options -from pip._internal.utils.misc import ensure_dir, normalize_path, write_output -from pip._internal.utils.temp_dir import TempDirectory - -logger = logging.getLogger(__name__) - - -class DownloadCommand(RequirementCommand): - """ - Download packages from: - - - PyPI (and other indexes) using requirement specifiers. - - VCS project urls. - - Local project directories. - - Local or remote source archives. - - pip also supports downloading from "requirements files", which provide - an easy way to specify a whole environment to be downloaded. - """ - - usage = """ - %prog [options] [package-index-options] ... - %prog [options] -r [package-index-options] ... - %prog [options] ... - %prog [options] ... - %prog [options] ...""" - - def add_options(self) -> None: - self.cmd_opts.add_option(cmdoptions.constraints()) - self.cmd_opts.add_option(cmdoptions.requirements()) - self.cmd_opts.add_option(cmdoptions.no_deps()) - self.cmd_opts.add_option(cmdoptions.global_options()) - self.cmd_opts.add_option(cmdoptions.no_binary()) - self.cmd_opts.add_option(cmdoptions.only_binary()) - self.cmd_opts.add_option(cmdoptions.prefer_binary()) - self.cmd_opts.add_option(cmdoptions.src()) - self.cmd_opts.add_option(cmdoptions.pre()) - self.cmd_opts.add_option(cmdoptions.require_hashes()) - self.cmd_opts.add_option(cmdoptions.progress_bar()) - self.cmd_opts.add_option(cmdoptions.no_build_isolation()) - self.cmd_opts.add_option(cmdoptions.use_pep517()) - self.cmd_opts.add_option(cmdoptions.no_use_pep517()) - self.cmd_opts.add_option(cmdoptions.check_build_deps()) - self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) - - self.cmd_opts.add_option( - "-d", - "--dest", - "--destination-dir", - "--destination-directory", - dest="download_dir", - metavar="dir", - default=os.curdir, - help="Download packages into .", - ) - - cmdoptions.add_target_python_options(self.cmd_opts) - - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, - self.parser, - ) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, self.cmd_opts) - - @with_cleanup - def run(self, options: Values, args: List[str]) -> int: - options.ignore_installed = True - # editable doesn't really make sense for `pip download`, but the bowels - # of the RequirementSet code require that property. - options.editables = [] - - cmdoptions.check_dist_restriction(options) - - options.download_dir = normalize_path(options.download_dir) - ensure_dir(options.download_dir) - - session = self.get_default_session(options) - - target_python = make_target_python(options) - finder = self._build_package_finder( - options=options, - session=session, - target_python=target_python, - ignore_requires_python=options.ignore_requires_python, - ) - - build_tracker = self.enter_context(get_build_tracker()) - - directory = TempDirectory( - delete=not options.no_clean, - kind="download", - globally_managed=True, - ) - - reqs = self.get_requirements(args, options, finder, session) - check_legacy_setup_py_options(options, reqs) - - preparer = self.make_requirement_preparer( - temp_build_dir=directory, - options=options, - build_tracker=build_tracker, - session=session, - finder=finder, - download_dir=options.download_dir, - use_user_site=False, - verbosity=self.verbosity, - ) - - resolver = self.make_resolver( - preparer=preparer, - finder=finder, - options=options, - ignore_requires_python=options.ignore_requires_python, - use_pep517=options.use_pep517, - py_version_info=options.python_version, - ) - - self.trace_basic_info(finder) - - requirement_set = resolver.resolve(reqs, check_supported_wheels=True) - - downloaded: List[str] = [] - for req in requirement_set.requirements.values(): - if req.satisfied_by is None: - assert req.name is not None - preparer.save_linked_requirement(req) - downloaded.append(req.name) - if downloaded: - write_output("Successfully downloaded %s", " ".join(downloaded)) - - return SUCCESS diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/configuration.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/configuration.py deleted file mode 100644 index 8fd46c9b8e0dbab8716c33d349f55a494613ba8f..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/configuration.py +++ /dev/null @@ -1,374 +0,0 @@ -"""Configuration management setup - -Some terminology: -- name - As written in config files. -- value - Value associated with a name -- key - Name combined with it's section (section.name) -- variant - A single word describing where the configuration key-value pair came from -""" - -import configparser -import locale -import os -import sys -from typing import Any, Dict, Iterable, List, NewType, Optional, Tuple - -from pip._internal.exceptions import ( - ConfigurationError, - ConfigurationFileCouldNotBeLoaded, -) -from pip._internal.utils import appdirs -from pip._internal.utils.compat import WINDOWS -from pip._internal.utils.logging import getLogger -from pip._internal.utils.misc import ensure_dir, enum - -RawConfigParser = configparser.RawConfigParser # Shorthand -Kind = NewType("Kind", str) - -CONFIG_BASENAME = "pip.ini" if WINDOWS else "pip.conf" -ENV_NAMES_IGNORED = "version", "help" - -# The kinds of configurations there are. -kinds = enum( - USER="user", # User Specific - GLOBAL="global", # System Wide - SITE="site", # [Virtual] Environment Specific - ENV="env", # from PIP_CONFIG_FILE - ENV_VAR="env-var", # from Environment Variables -) -OVERRIDE_ORDER = kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR -VALID_LOAD_ONLY = kinds.USER, kinds.GLOBAL, kinds.SITE - -logger = getLogger(__name__) - - -# NOTE: Maybe use the optionx attribute to normalize keynames. -def _normalize_name(name: str) -> str: - """Make a name consistent regardless of source (environment or file)""" - name = name.lower().replace("_", "-") - if name.startswith("--"): - name = name[2:] # only prefer long opts - return name - - -def _disassemble_key(name: str) -> List[str]: - if "." not in name: - error_message = ( - "Key does not contain dot separated section and key. " - "Perhaps you wanted to use 'global.{}' instead?" - ).format(name) - raise ConfigurationError(error_message) - return name.split(".", 1) - - -def get_configuration_files() -> Dict[Kind, List[str]]: - global_config_files = [ - os.path.join(path, CONFIG_BASENAME) for path in appdirs.site_config_dirs("pip") - ] - - site_config_file = os.path.join(sys.prefix, CONFIG_BASENAME) - legacy_config_file = os.path.join( - os.path.expanduser("~"), - "pip" if WINDOWS else ".pip", - CONFIG_BASENAME, - ) - new_config_file = os.path.join(appdirs.user_config_dir("pip"), CONFIG_BASENAME) - return { - kinds.GLOBAL: global_config_files, - kinds.SITE: [site_config_file], - kinds.USER: [legacy_config_file, new_config_file], - } - - -class Configuration: - """Handles management of configuration. - - Provides an interface to accessing and managing configuration files. - - This class converts provides an API that takes "section.key-name" style - keys and stores the value associated with it as "key-name" under the - section "section". - - This allows for a clean interface wherein the both the section and the - key-name are preserved in an easy to manage form in the configuration files - and the data stored is also nice. - """ - - def __init__(self, isolated: bool, load_only: Optional[Kind] = None) -> None: - super().__init__() - - if load_only is not None and load_only not in VALID_LOAD_ONLY: - raise ConfigurationError( - "Got invalid value for load_only - should be one of {}".format( - ", ".join(map(repr, VALID_LOAD_ONLY)) - ) - ) - self.isolated = isolated - self.load_only = load_only - - # Because we keep track of where we got the data from - self._parsers: Dict[Kind, List[Tuple[str, RawConfigParser]]] = { - variant: [] for variant in OVERRIDE_ORDER - } - self._config: Dict[Kind, Dict[str, Any]] = { - variant: {} for variant in OVERRIDE_ORDER - } - self._modified_parsers: List[Tuple[str, RawConfigParser]] = [] - - def load(self) -> None: - """Loads configuration from configuration files and environment""" - self._load_config_files() - if not self.isolated: - self._load_environment_vars() - - def get_file_to_edit(self) -> Optional[str]: - """Returns the file with highest priority in configuration""" - assert self.load_only is not None, "Need to be specified a file to be editing" - - try: - return self._get_parser_to_modify()[0] - except IndexError: - return None - - def items(self) -> Iterable[Tuple[str, Any]]: - """Returns key-value pairs like dict.items() representing the loaded - configuration - """ - return self._dictionary.items() - - def get_value(self, key: str) -> Any: - """Get a value from the configuration.""" - orig_key = key - key = _normalize_name(key) - try: - return self._dictionary[key] - except KeyError: - # disassembling triggers a more useful error message than simply - # "No such key" in the case that the key isn't in the form command.option - _disassemble_key(key) - raise ConfigurationError(f"No such key - {orig_key}") - - def set_value(self, key: str, value: Any) -> None: - """Modify a value in the configuration.""" - key = _normalize_name(key) - self._ensure_have_load_only() - - assert self.load_only - fname, parser = self._get_parser_to_modify() - - if parser is not None: - section, name = _disassemble_key(key) - - # Modify the parser and the configuration - if not parser.has_section(section): - parser.add_section(section) - parser.set(section, name, value) - - self._config[self.load_only][key] = value - self._mark_as_modified(fname, parser) - - def unset_value(self, key: str) -> None: - """Unset a value in the configuration.""" - orig_key = key - key = _normalize_name(key) - self._ensure_have_load_only() - - assert self.load_only - if key not in self._config[self.load_only]: - raise ConfigurationError(f"No such key - {orig_key}") - - fname, parser = self._get_parser_to_modify() - - if parser is not None: - section, name = _disassemble_key(key) - if not ( - parser.has_section(section) and parser.remove_option(section, name) - ): - # The option was not removed. - raise ConfigurationError( - "Fatal Internal error [id=1]. Please report as a bug." - ) - - # The section may be empty after the option was removed. - if not parser.items(section): - parser.remove_section(section) - self._mark_as_modified(fname, parser) - - del self._config[self.load_only][key] - - def save(self) -> None: - """Save the current in-memory state.""" - self._ensure_have_load_only() - - for fname, parser in self._modified_parsers: - logger.info("Writing to %s", fname) - - # Ensure directory exists. - ensure_dir(os.path.dirname(fname)) - - with open(fname, "w") as f: - parser.write(f) - - # - # Private routines - # - - def _ensure_have_load_only(self) -> None: - if self.load_only is None: - raise ConfigurationError("Needed a specific file to be modifying.") - logger.debug("Will be working with %s variant only", self.load_only) - - @property - def _dictionary(self) -> Dict[str, Any]: - """A dictionary representing the loaded configuration.""" - # NOTE: Dictionaries are not populated if not loaded. So, conditionals - # are not needed here. - retval = {} - - for variant in OVERRIDE_ORDER: - retval.update(self._config[variant]) - - return retval - - def _load_config_files(self) -> None: - """Loads configuration from configuration files""" - config_files = dict(self.iter_config_files()) - if config_files[kinds.ENV][0:1] == [os.devnull]: - logger.debug( - "Skipping loading configuration files due to " - "environment's PIP_CONFIG_FILE being os.devnull" - ) - return - - for variant, files in config_files.items(): - for fname in files: - # If there's specific variant set in `load_only`, load only - # that variant, not the others. - if self.load_only is not None and variant != self.load_only: - logger.debug("Skipping file '%s' (variant: %s)", fname, variant) - continue - - parser = self._load_file(variant, fname) - - # Keeping track of the parsers used - self._parsers[variant].append((fname, parser)) - - def _load_file(self, variant: Kind, fname: str) -> RawConfigParser: - logger.verbose("For variant '%s', will try loading '%s'", variant, fname) - parser = self._construct_parser(fname) - - for section in parser.sections(): - items = parser.items(section) - self._config[variant].update(self._normalized_keys(section, items)) - - return parser - - def _construct_parser(self, fname: str) -> RawConfigParser: - parser = configparser.RawConfigParser() - # If there is no such file, don't bother reading it but create the - # parser anyway, to hold the data. - # Doing this is useful when modifying and saving files, where we don't - # need to construct a parser. - if os.path.exists(fname): - locale_encoding = locale.getpreferredencoding(False) - try: - parser.read(fname, encoding=locale_encoding) - except UnicodeDecodeError: - # See https://github.com/pypa/pip/issues/4963 - raise ConfigurationFileCouldNotBeLoaded( - reason=f"contains invalid {locale_encoding} characters", - fname=fname, - ) - except configparser.Error as error: - # See https://github.com/pypa/pip/issues/4893 - raise ConfigurationFileCouldNotBeLoaded(error=error) - return parser - - def _load_environment_vars(self) -> None: - """Loads configuration from environment variables""" - self._config[kinds.ENV_VAR].update( - self._normalized_keys(":env:", self.get_environ_vars()) - ) - - def _normalized_keys( - self, section: str, items: Iterable[Tuple[str, Any]] - ) -> Dict[str, Any]: - """Normalizes items to construct a dictionary with normalized keys. - - This routine is where the names become keys and are made the same - regardless of source - configuration files or environment. - """ - normalized = {} - for name, val in items: - key = section + "." + _normalize_name(name) - normalized[key] = val - return normalized - - def get_environ_vars(self) -> Iterable[Tuple[str, str]]: - """Returns a generator with all environmental vars with prefix PIP_""" - for key, val in os.environ.items(): - if key.startswith("PIP_"): - name = key[4:].lower() - if name not in ENV_NAMES_IGNORED: - yield name, val - - # XXX: This is patched in the tests. - def iter_config_files(self) -> Iterable[Tuple[Kind, List[str]]]: - """Yields variant and configuration files associated with it. - - This should be treated like items of a dictionary. - """ - # SMELL: Move the conditions out of this function - - # environment variables have the lowest priority - config_file = os.environ.get("PIP_CONFIG_FILE", None) - if config_file is not None: - yield kinds.ENV, [config_file] - else: - yield kinds.ENV, [] - - config_files = get_configuration_files() - - # at the base we have any global configuration - yield kinds.GLOBAL, config_files[kinds.GLOBAL] - - # per-user configuration next - should_load_user_config = not self.isolated and not ( - config_file and os.path.exists(config_file) - ) - if should_load_user_config: - # The legacy config file is overridden by the new config file - yield kinds.USER, config_files[kinds.USER] - - # finally virtualenv configuration first trumping others - yield kinds.SITE, config_files[kinds.SITE] - - def get_values_in_config(self, variant: Kind) -> Dict[str, Any]: - """Get values present in a config file""" - return self._config[variant] - - def _get_parser_to_modify(self) -> Tuple[str, RawConfigParser]: - # Determine which parser to modify - assert self.load_only - parsers = self._parsers[self.load_only] - if not parsers: - # This should not happen if everything works correctly. - raise ConfigurationError( - "Fatal Internal error [id=2]. Please report as a bug." - ) - - # Use the highest priority parser. - return parsers[-1] - - # XXX: This is patched in the tests. - def _mark_as_modified(self, fname: str, parser: RawConfigParser) -> None: - file_parser_tuple = (fname, parser) - if file_parser_tuple not in self._modified_parsers: - self._modified_parsers.append(file_parser_tuple) - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self._dictionary!r})" diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/index.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/index.py deleted file mode 100644 index b94c32511f0cda2363bfc4f29c9c8bfcc7101f9b..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/index.py +++ /dev/null @@ -1,28 +0,0 @@ -import urllib.parse - - -class PackageIndex: - """Represents a Package Index and provides easier access to endpoints""" - - __slots__ = ["url", "netloc", "simple_url", "pypi_url", "file_storage_domain"] - - def __init__(self, url: str, file_storage_domain: str) -> None: - super().__init__() - self.url = url - self.netloc = urllib.parse.urlsplit(url).netloc - self.simple_url = self._url_for_path("simple") - self.pypi_url = self._url_for_path("pypi") - - # This is part of a temporary hack used to block installs of PyPI - # packages which depend on external urls only necessary until PyPI can - # block such packages themselves - self.file_storage_domain = file_storage_domain - - def _url_for_path(self, path: str) -> str: - return urllib.parse.urljoin(self.url, path) - - -PyPI = PackageIndex("https://pypi.org/", file_storage_domain="files.pythonhosted.org") -TestPyPI = PackageIndex( - "https://test.pypi.org/", file_storage_domain="test-files.pythonhosted.org" -) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/engine/__init__.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/engine/__init__.py deleted file mode 100644 index 08a61572b4c7d09c8d400e903a96cbf5b2cc4763..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/engine/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from .launch import * -from .train_loop import * - -__all__ = [k for k in globals().keys() if not k.startswith("_")] - - -# prefer to let hooks and defaults live in separate namespaces (therefore not in __all__) -# but still make them available here -from .hooks import * -from .defaults import * diff --git a/spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_new.py b/spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_new.py deleted file mode 100644 index bfaf72e48b31cc1130f2892b0973c9aa06f195a3..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_new.py +++ /dev/null @@ -1,132 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F -from . import layers_new - - -class BaseNet(nn.Module): - def __init__( - self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6)) - ): - super(BaseNet, self).__init__() - self.enc1 = layers_new.Conv2DBNActiv(nin, nout, 3, 1, 1) - self.enc2 = layers_new.Encoder(nout, nout * 2, 3, 2, 1) - self.enc3 = layers_new.Encoder(nout * 2, nout * 4, 3, 2, 1) - self.enc4 = layers_new.Encoder(nout * 4, nout * 6, 3, 2, 1) - self.enc5 = layers_new.Encoder(nout * 6, nout * 8, 3, 2, 1) - - self.aspp = layers_new.ASPPModule(nout * 8, nout * 8, dilations, dropout=True) - - self.dec4 = layers_new.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1) - self.dec3 = layers_new.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1) - self.dec2 = layers_new.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1) - self.lstm_dec2 = layers_new.LSTMModule(nout * 2, nin_lstm, nout_lstm) - self.dec1 = layers_new.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1) - - def __call__(self, x): - e1 = self.enc1(x) - e2 = self.enc2(e1) - e3 = self.enc3(e2) - e4 = self.enc4(e3) - e5 = self.enc5(e4) - - h = self.aspp(e5) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = torch.cat([h, self.lstm_dec2(h)], dim=1) - h = self.dec1(h, e1) - - return h - - -class CascadedNet(nn.Module): - def __init__(self, n_fft, nout=32, nout_lstm=128): - super(CascadedNet, self).__init__() - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - self.nin_lstm = self.max_bin // 2 - self.offset = 64 - - self.stg1_low_band_net = nn.Sequential( - BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm), - layers_new.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0), - ) - - self.stg1_high_band_net = BaseNet( - 2, nout // 4, self.nin_lstm // 2, nout_lstm // 2 - ) - - self.stg2_low_band_net = nn.Sequential( - BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm), - layers_new.Conv2DBNActiv(nout, nout // 2, 1, 1, 0), - ) - self.stg2_high_band_net = BaseNet( - nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2 - ) - - self.stg3_full_band_net = BaseNet( - 3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm - ) - - self.out = nn.Conv2d(nout, 2, 1, bias=False) - self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False) - - def forward(self, x): - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - l1_in = x[:, :, :bandw] - h1_in = x[:, :, bandw:] - l1 = self.stg1_low_band_net(l1_in) - h1 = self.stg1_high_band_net(h1_in) - aux1 = torch.cat([l1, h1], dim=2) - - l2_in = torch.cat([l1_in, l1], dim=1) - h2_in = torch.cat([h1_in, h1], dim=1) - l2 = self.stg2_low_band_net(l2_in) - h2 = self.stg2_high_band_net(h2_in) - aux2 = torch.cat([l2, h2], dim=2) - - f3_in = torch.cat([x, aux1, aux2], dim=1) - f3 = self.stg3_full_band_net(f3_in) - - mask = torch.sigmoid(self.out(f3)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux = torch.cat([aux1, aux2], dim=1) - aux = torch.sigmoid(self.aux_out(aux)) - aux = F.pad( - input=aux, - pad=(0, 0, 0, self.output_bin - aux.size()[2]), - mode="replicate", - ) - return mask, aux - else: - return mask - - def predict_mask(self, x): - mask = self.forward(x) - - if self.offset > 0: - mask = mask[:, :, :, self.offset : -self.offset] - assert mask.size()[3] > 0 - - return mask - - def predict(self, x, aggressiveness=None): - mask = self.forward(x) - pred_mag = x * mask - - if self.offset > 0: - pred_mag = pred_mag[:, :, :, self.offset : -self.offset] - assert pred_mag.size()[3] > 0 - - return pred_mag diff --git a/spaces/Benson/text-generation/Examples/Apps Juegos Gratis Descargar Solitario.md b/spaces/Benson/text-generation/Examples/Apps Juegos Gratis Descargar Solitario.md deleted file mode 100644 index c1627699c7e146288248eb00e69ff822a61b7aed..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Apps Juegos Gratis Descargar Solitario.md +++ /dev/null @@ -1,76 +0,0 @@ -
      -

      Apps Juegos Gratis Descargar Solitaire: Una guía para el juego de cartas clásico

      -

      Solitaire es uno de los juegos de cartas más populares y duraderos del mundo. Es un juego que puede ser jugado por cualquiera, en cualquier lugar, en cualquier momento. Tanto si quieres relajarte, desafiar tu cerebro o simplemente divertirte, el solitario es el juego perfecto para ti. Pero, ¿cómo se puede jugar solitario en su dispositivo? ¿Cuáles son las mejores aplicaciones de solitario para descargar gratis? ¿Y cuáles son algunos consejos y trucos para mejorar sus habilidades de solitario? En este artículo, vamos a responder a estas preguntas y más. Sigue leyendo para aprender todo lo que necesitas saber sobre aplicaciones juegos gratis descargar solitario.

      -

      apps juegos gratis descargar solitario


      Download Zip ✫✫✫ https://bltlly.com/2v6Kfa



      -

      ¿Qué es el solitario y cómo jugarlo

      -

      Solitario, también conocido como paciencia o cabale, es una familia de juegos de cartas que son jugados por una persona. El objetivo del solitario es organizar las cartas en un orden determinado, generalmente por palo y rango, o emparejarlas y descartarlas. Hay cientos de juegos de solitario diferentes, cada uno con sus propias reglas y variaciones. Sin embargo, algunos elementos comunes que comparten la mayoría de los juegos de solitario son:

      -
        -
      • Una baraja de 52 cartas estándar
      • -
      • Un cuadro o diseño de tarjetas en una tabla o pantalla
      • -
      • Un montón de cartas que no están en el tablero
      • -
      • Un montón de cartas que se han jugado desde el stock
      • -
      • Una base o destino pilas de tarjetas que están dispuestas en orden
      • -
      -

      Para jugar al solitario, debes seguir estos pasos básicos:

      -
        -
      1. Baraja la baraja y reparte algunas cartas boca arriba en el tablero, de acuerdo con las reglas del juego.
      2. -
      3. Gire la tarjeta superior de la acción y colocarlo en la pila de residuos.
      4. -
      5. Mueva las cartas desde el tablero o la pila de residuos a las pilas de cimentación, siguiendo las reglas del juego.
      6. -
      7. Si te quedas sin movimientos, voltea otra carta del montón y colócala en el montón de residuos.
      8. -
      9. Repite los pasos 3 y 4 hasta que ganes o pierdas el juego.
      10. -
      - -

      Los orígenes exactos del solitario no están claros, pero los primeros registros aparecen a finales de 1700 en el norte de Europa y Escandinavia. El término Patiencespiel aparece en un libro alemán publicado en 1788[ 1], y los libros también aparecieron en Suecia y Rusia a principios de 1800. Algunas fuentes sugieren que el solitario era originalmente una forma de adivinación o adivinación, ya que a menudo se asociaba con la cartomancia (el uso de tarjetas para la predicción) y la cábala (conocimiento secreto). [ 2]

      -

      Las primeras colecciones de juegos de solitario en inglés aparecieron en la década de 1860, muchas de ellas traducciones de francés o alemán. Charles Dickens mencionó a Magwitch jugando "un tipo complicado de paciencia con cartas desiguales" en Great Expectations (1861), y el marido alemán de la reina Victoria, Albert, era un jugador entusiasta. [ 3] A principios del siglo XX, el nombre "solitario" se estableció en América del Norte, donde sigue siendo más popular que "paciencia". El juego de solitario más famoso es Klondike, que también fue llamado Microsoft Solitaire después de que se incluyó en el sistema operativo Windows a partir de 1990. [ 4]

      -

      -

      Las reglas y variaciones del solitario

      -

      Como se mencionó anteriormente, hay cientos de diferentes juegos de solitario, cada uno con sus propias reglas y variaciones. Algunos de los más populares son:

      - -
    NombreDescripción
    KlondikeEl clásico juego de solitario con el que la mayoría de la gente está familiarizada. El objetivo es construir cuatro pilas de bases de cartas en orden ascendente por palo, a partir de ases. El tablero consta de siete columnas de cartas, con la carta superior boca arriba y el resto boca abajo. Puede mover las cartas del tablero a la fundación, o entre las columnas, siempre y cuando estén en orden descendente y alternando colores. También puedes dibujar una o tres cartas del montón a la pila de residuos, y moverlas al tablero o a la fundación.
    FreeCellUn juego de solitario que requiere más estrategia que suerte. El objetivo es construir cuatro pilas de bases de cartas en orden ascendente por palo, a partir de ases. El tablero consta de ocho columnas de cartas, todas boca arriba. Puede mover las cartas del tablero a la fundación, o entre las columnas, siempre y cuando estén en orden descendente y alternando colores. También puede usar cuatro celdas gratuitas para almacenar temporalmente una tarjeta cada una, lo que puede ayudarlo a mover las tarjetas.
    PyramidUn juego de solitario que utiliza un diseño en forma de pirámide de tarjetas. El objetivo es eliminar todas las cartas de la pirámide emparejándolas y descartándolas. Solo puedes emparejar cartas que estén expuestas, lo que significa que no tienen otras cartas encima de ellas. También puede emparejar una tarjeta con la tarjeta superior de la pila de residuos o el stock. Los pares deben sumar hasta 13, con ases contando como 1 y reyes contando como 13.
    GolfUn juego de solitario que utiliza un diseño de cartas con temática de golf. El objetivo es mover todas las cartas del tablero a la pila de residuos, descartándolas una a la vez. Solo puedes descartar una carta que tenga un rango más alto o más bajo que la carta superior de la pila de residuos, independientemente del palo. También puedes entregar una carta del montón a la pila de residuos cuando te quedes sin movimientos.
    CarácterCapacidad
    ErenPuede transformarse en un Titan por un tiempo limitado presionando T. Puede regenerar la salud y la resistencia como un Titan. Puede perforar y agarrar Titanes como un Titan.
    MikasaPuede realizar una barra potente que inflige daño adicional presionando T. Puede aumentar la velocidad y el daño por un corto tiempo presionando Q.
    LeviPuede realizar una barra giratoria que inflige daño adicional y corta varias extremidades presionando T. Puede aumentar la velocidad y el daño por un corto tiempo presionando Q.
    ArminPuede distraer a los Titanes gritando "¡Soy el comandante!" presionando T. Puede aumentar el daño de los aliados cercanos por un corto tiempo presionando Q.
    JeanPuede invocar a un caballo para montar presionando T. Puede aumentar la velocidad y la resistencia mientras monta el caballo. Puede saltar del caballo y usarlo como señuelo presionando Q.
    MarcoPuede soltar elementos como cuchillas, gas o armas presionando T. Puede aumentar la tasa de caída de elementos de aliados cercanos por un corto tiempo presionando Q.
    PetraPuede ahorrar gas usando menos gas mientras se balancea o se impulsa presionando T. Puede aumentar la eficiencia del gas de los aliados cercanos por un corto tiempo presionando Q.
    SashaPuede comer un pedazo de carne para restaurar la salud y la resistencia presionando T. Puede aumentar la salud y la regeneración de la resistencia de los aliados cercanos por un corto tiempo presionando Q.
    AnniePuede usar una hoja de anillo que tiene un rango más largo y mayor daño que las cuchillas normales presionando T. Puede transformarse en una Titán hembra por un tiempo limitado presionando Q. Puede endurecer su piel y cristalizar su nuca como una Titán hembra.
    ErwinPuede disparar una pistola de bengalas que marca la ubicación de un titan presionando T. Puede aumentar el daño y la velocidad de todos los aliados en el mapa por un corto tiempo presionando Q.
    MapaCaracterísticas
    Distrito de TrostEl primer mapa del juego, basado en el primer arco de la serie de anime. Tiene una gran pared, varios edificios y una puerta que puede ser destruida por el colosal Titán. También tiene cañones que se pueden utilizar para disparar a los Titanes.
    Bosque de árboles gigantesEl segundo mapa del juego, basado en el segundo arco de la serie de anime. Tiene un bosque denso con árboles enormes que se pueden utilizar para balancearse y esconderse. También tiene un claro donde aparece la Titán hembra.
    Fuera de la pared Maria
    Ciudad (Noche)El cuarto mapa del juego, basado en el cuarto arco de la serie de anime. Tiene una ciudad oscura con farolas, edificios y puentes. También tiene un río que se puede utilizar para escapar o ahogar Titanes.
    Colosal TitanEl quinto mapa del juego, basado en el quinto arco de la serie de anime. Tiene un muro que está siendo atacado por el colosal Titán y otros Titanes. También tiene un tren que se puede utilizar para transportar soldados y suministros.
    ModoCaracterísticas
    Modo normalEl modo predeterminado del juego, donde tienes que matar a todos los Titanes en el mapa o sobrevivir durante un tiempo determinado. Puedes elegir entre diferentes sub-modos como Single Player, Multijugador, No Respawn, No Punk, No Crawler, No Abnormal, No Female, No Colossal, No Armored.
    Modo duroUn modo más desafiante del juego, donde tienes que matar a todos los Titanes en el mapa o sobrevivir durante un tiempo determinado. Los Titanes son más rápidos, fuertes, inteligentes y variados. Puedes elegir entre diferentes sub-modos como Single Player, Multiplayer, No Respawn, Punk Only, Crawler Only, Abnormal Only, Female Only, Colossal Only, Armored Only.
    Modo de capturaUn modo del juego donde tienes que capturar Titanes vivos usando redes especiales. Puedes elegir entre diferentes sub-modos como Un Jugador, Multijugador, Capturar a Todos, Capturar a Uno, Capturar a Annie, Capturar a Colosal, Capturar a Blindados.
    Modo de carreras
    Modo jefeUn modo del juego donde tienes que luchar contra un jefe gigante Titan que tiene habilidades especiales y ataques. Puedes elegir entre diferentes sub-modos como Jugador único, Multijugador, Titan femenino, Titan colosal, Titan blindado.
    Modo PvPUn modo del juego donde tienes que luchar contra otros jugadores usando tu equipo de maniobra 3D y armas. Puedes elegir entre diferentes sub-modos como Single Player, Multijugador, Team Deathmatch, Capture the Flag, King of the Hill.
    Modo de ondaUn modo del juego en el que tienes que sobrevivir a olas de Titanes que se vuelven cada vez más difíciles y más numerosas. Puedes elegir entre diferentes modos secundarios como Jugador único, Multijugador, Modo sin fin, Modo de supervivencia, Modo Titan.
    Modo de descenso de AkinaUn modo del juego donde tienes que correr por una colina empinada usando tu equipo de maniobra 3D y evitar obstáculos y Titanes. Puede elegir entre diferentes modos secundarios como Jugador único, Multijugador, Contrarreloj, Free Run.
    Modo de mapa personalizadoUn modo del juego donde puedes jugar en mapas personalizados creados por ti mismo u otros jugadores usando el editor de mapas. Puede elegir entre diferentes modos secundarios, como un solo jugador, multijugador, modo normal, modo duro, modo de captura, modo de carreras, modo jefe, modo PvP, modo de onda.
    - - - -
    -

    -
    -

            Data Provider Name

    -

    -

    - - - - -
    -

    -

    -

    -
    -

            Data Address

    -

    -

    - - - - -
    -

    -

    -

    -
    -

            Data Provider Contact - Information

    -

    -

    - - - - -
    -

    -
    -

            Data Provider Name

    -

    -

    - - - - -
    -

    -
    -

            Special Conditions (if - applicable)

    -

    -

    - - - - -
    -

    -
    -

            Data Management Plan

    -

    -

    -
      -
    1. Datasets
    2. -
    -

    - - - - -
    -

    -

    -

    -

    -

    -

    -

    -
    -

            List of Datasets

    -

    -

    - - - - -
    -

    -

    -

    -
    -

            License of Datasets (if - more than one, please assign in List of Datasets)

    -

    -

    - - - - -
    -

    -

    -

    -
    -

            Restrictions - Please indicate of any - restrictions apply to any of the above listed datasets

    -

    -

    -
      -
    1. Field of Use
    2. -
    -

    -

    Scope / use cases:

    -

    -

    ▯ under condition: openly released models, results, and artifacts

    -

    ▯under condition: use RAIL license for ML artifacts (has to be - attached)

    -

    ▯under condition: value alignment (determined by data host)

    -

    ▯under condition: value alignment (data modelers sign click-through form)

    -

    -
      -
    1. Data Distribution Policy
    2. -
    -

    -

    Acknowledging the immense value and benefits that your datasets may provide, and -  being conscious and respectful towards the different economic interests that you may have, - this Agreement offers the Data Provider a flexible set of optional frameworks for the use, re-use, and - distribution of data:

    -

    -

    The Data Provider permits the Data Host to use the Data for the purpose - set out in this Agreement. The Data Host is not allowed to make the - Data publicly available outside of the remits of this license (this - does not include Meta Data).

    -

    -

    ▯The Data Provider permits the Data Host to make the Data (as a whole or in - parts or processed) available to downstream users upon signing a non-dissemination agreement.

    -

    -

    ▯The Data Provider permits the Data Host to make the Data (as a whole or in parts or - processed) available to downstream users using a system that supports authentication/synchronization -

    -

    -

    ▯The Data Provider permits the Data Host to make the Data (as a whole or - in parts or processed) available with modifications such as anonymizing personal and/or - sensitive information about individuals.

    -

    -

    The Data Provider permits the Data - Host to use the Data for the purpose set out in this Agreement. Additionally, - the Data Host is allowed to make the Data publicly available under the Data license (select one) provided by - the Data Provider.

    -

    -

    ▯ CC BY 4.0 (Link)

    -

    ▯ CC BY-NC-ND 4.0 (Link)

    -

    ▯ CC BY-NC-SA 3.0 (Link)

    -

    ▯ CC BY-NC-SA 4.0 (Link)

    -

    ▯ CC BY-SA 3.0 (Link)

    -

    ▯ CC BY-SA 4.0 (Link)

    -

    ▯ CC-BY-NC 4.0 (Link)

    -

    ▯ Microsoft Research Data License Agreement (Link)

    -

    ▯custom license agreement (see Attachment if applicable)

    -

    Linux Foundation CDLA Permissive

    -

    Linux Foundation CDLA Restrictive

    -

    -

    -
    -

    -

    RAIL Model License (EXHIBIT B)

    -

    -

    -

    Find here: BLOOM RAIL License v1.0 

    -

    -

    -

    -

    -

    -

    -

    -
    -

    -

    Potential further clauses:

    -

    -

    X. CONFLICT RESOLUTION

    -

    In the case of any dispute, the parties shall attempt to resolve the issue by - negotiation first.

    -

    In the case such negotiations cannot resolve the issue within six months either party - may bring the issue to the applicable court of law.

    -

    -

    X. NO WAIVER

    -

    The failure of the Data Host or Data Provider to enforce or execute any right or - provision of this Agreement shall not constitute a waiver of that right or provision.

    -

    -

    X TITLES

    -

    Headings and Section titles in this Agreement are only for convenience and are not to - be considered in construing this Agreement.

    -
    -

    -
    - - - \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/CONCOURS a venir les meilleures coles dingnieurs intgrer en 2023.md b/spaces/bioriAsaeru/text-to-voice/CONCOURS a venir les meilleures coles dingnieurs intgrer en 2023.md deleted file mode 100644 index 4fce0ecc198cdb809b64362297efef9e5480e424..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/CONCOURS a venir les meilleures coles dingnieurs intgrer en 2023.md +++ /dev/null @@ -1,14 +0,0 @@ -
    -

    Les candidats au concours externe spécial dit concours "Talents" devront faire signer par leur établissement de formation l'attestation de suivi d'un cycle de formation (Prépa Talents)

    -

    CONCOURS a venir


    DOWNLOAD ··· https://urloso.com/2uyRPg



    -

    Les dossiers RAEP (des candidats internes et 3ème concours) et les CV (des candidats externes) doivent être envoyés en 5 exemplaires au CNG (lettre recommandée avec AR) au plus tard le lundi 19 septembre 2022.

    -

    Pour toute information, vous pouvez nous contacter par courriel :
    [casvp-did-srh-concours puis paris.fr après le signe @]pnfic-qvq-feu-pbapbhef@cnevf.se[casvp-did-srh-concours puis paris.fr après le signe @]

    -

    En se renseignant auprès du Service des Ressources Humaines- Section des Concours, Bureau 6414- 5, boulevard Diderot 75589 PARIS cedex 12-
    Courriel : [casvp-did-srh-concours puis paris.fr après le signe @]pnfic-qvq-feu-pbapbhef@cnevf.se[casvp-did-srh-concours puis paris.fr après le signe @]

    -

    En cas de réussite au concours ou à l'entretien, ce visa vous permettra de demander une carte de séjour étudiantdirectement en préfecture. Vous ne serez pas obligé de retourner dans votre pays pour demander un visa de long séjour.

    -

    -

    pour pouvoir demander cette carte directement en préfecture, vous devez demander un visa de court séjour étudiant concoursmême si vous êtes dispensé de visa de court séjour Schengen.

    -

    En cette période anxiogène, pour les jeunes (et les autres !) du fait de la crise climatique, la chute de la biodiversité, la situation géopolitique, un tel concours est particulièrement important. Comprendre les enjeux, vouloir les adresser, élaborer des solutions: AGIR est LA réponse pour tous.

    -

    Le Guide Bordeaux Gironde vous aide \u00e0 pr\u00e9parer votre s\u00e9jourCe site utilise des cookies qui nous aident \u00e0 identifier le contenu qui int\u00e9resse le plus nos visiteurs et ainsi vous proposer la meilleure exp\u00e9rience pour pr\u00e9parer vos vacances. Ces cookies peuvent \u00e9galement nous permettre de rep\u00e9rer certains dysfonctionnements. Ils ne stockent aucune information personnelle et vous pouvez configurer votre navigateur afin de les bloquer. Ils ne nous permettent pas de savoir qui vous \u00eates exactement et o\u00f9 vous habitez... Nous collectons alors uniquement des donn\u00e9es anonymes. En utilisant ce site, vous acceptez que Google Analytics collecte des donn\u00e9es anonymes pour mesurer l'audience. Vous pouvez refuser en cliquant sur \u00ab Je refuse \u00bb. Conditions G\u00e9n\u00e9rales d'Utilisation.","MSGCONFIRMOPTOUT":"Vous vous \u00eates oppos\u00e9 au d\u00e9p\u00f4t de cookies de mesures d'audience dans votre navigateur","MSGCONFIRMOPTIN":"Vous avez accept\u00e9 le d\u00e9p\u00f4t de cookies de mesures d'audience dans votre navigateur","BTSHOWDETAILS":"En savoir plus","BTCONSENT":"J'accepte","BTNOTCONSENT":"Je refuse"}; var gaID = 'UA-923761-7'; var idCommune = 0; var eventFilterDates = '2023-02-09'; var eventFilterCategory = "i":"392","l":"Jeux-concours","u":"\/fr\/agenda\/jeux-concours.html"; var eventFilterCity = ''; var eventCategories = ["i":"373","l":"March\u00e9s","n":"417","d":false,"u":"\/fr\/agenda\/marches.html","i":"374","l":"Brocantes, Vides greniers","n":"38","d":false,"u":"\/fr\/agenda\/brocantes-vides-greniers.html","i":"375","l":"F\u00eates populaires","n":"282","d":false,"u":"\/fr\/agenda\/fetes-populaires.html","i":"376","l":"Ev\u00e8nements sportifs","n":"288","d":false,"u":"\/fr\/agenda\/evenements-sportifs.html","i":"377","l":"Cin\u00e9ma","n":"69","d":false,"u":"\/fr\/agenda\/cinema.html","i":"378","l":"Concerts","n":"161","d":false,"u":"\/fr\/agenda\/concerts.html","i":"379","l":"Festivals","n":"13","d":false,"u":"\/fr\/agenda\/festivals.html","i":"380","l":"Spectacles","n":"56","d":false,"u":"\/fr\/agenda\/spectacles.html","i":"381","l":"Th\u00e9\u00e2tre","n":"152","d":false,"u":"\/fr\/agenda\/theatre.html","i":"382","l":"Cirque","n":"25","d":false,"u":"\/fr\/agenda\/cirque.html","i":"383","l":"Expositions","n":"254","d":false,"u":"\/fr\/agenda\/expositions.html","i":"384","l":"Sorties natures","n":"398","d":false,"u":"\/fr\/agenda\/sorties-natures.html","i":"385","l":"Culture","n":"202","d":false,"u":"\/fr\/agenda\/culture.html","i":"386","l":"Patrimoine","n":"183","d":false,"u":"\/fr\/agenda\/patrimoine.html","i":"387","l":"Danse","n":"48","d":false,"u":"\/fr\/agenda\/danse.html","i":"388","l":"Musique","n":"26","d":false,"u":"\/fr\/agenda\/musique.html","i":"389","l":"Gastronomie","n":"49","d":false,"u":"\/fr\/agenda\/gastronomie.html","i":"390","l":"Foires et Salons","n":"15","d":false,"u":"\/fr\/agenda\/foires-et-salons.html","i":"391","l":"Conf\u00e9rences","n":"80","d":false,"u":"\/fr\/agenda\/conferences.html","i":"392","l":"Jeux-concours","n":"85","d":false,"u":"\/fr\/agenda\/jeux-concours.html","i":"393","l":"Lotos Quines","n":"60","d":false,"u":"\/fr\/agenda\/lotos-quines.html","i":"394","l":"Divers","n":"25","d":false,"u":"\/fr\/agenda\/divers.html"]; var urlCategory = '/fr/agenda/jeux-concours.html'; var eventCities = ["i":"125","l":"Civrac-en-M\u00e9doc","u":"\/fr\/agenda\/jeux-concours\/civrac-en-medoc-125.html","n":"83","d":false,"i":"308","l":"Pauillac","u":"\/fr\/agenda\/jeux-concours\/pauillac-308.html","n":"1","d":false,"i":"348","l":"Rions","u":"\/fr\/agenda\/jeux-concours\/rions-348.html","n":"1","d":false]; var urlCity = '/fr/agenda.html'; var mapTilesProvider = 'osm'; var mapCenter = ["44.8637065","-0.6561808"]; var mapZoom = 9; var racine = ''; var idPage = 392; var menuTitle = 'Menu'; var guides = "gdp":"d":"guide-du-perigord.com","n":"Guide du P\u00e9rigord","r":"Le P\u00e9rigord","gdpb":"d":"guide-du-paysbasque.com","n":"Guide du Pays Basque","r":"Le Pays-Basque","gdl":"d":"guide-des-landes.com","n":"Guide des Landes","r":"Les Landes","gdlg":"d":"guide-du-lot-et-garonne.com","n":"Guide du Lot et Garonne","r":"Le Lot et Garonne","gdb":"d":"guide-bearn-pyrenees.com","n":"Guide B\u00e9arn Pyr\u00e9n\u00e9es","r":"Le B\u00e9arn Pyr\u00e9n\u00e9es","gdcm":"d":"guide-charente-maritime.com","n":"Guide de Charente Maritime","r":"La Charente Maritime","gtp":"d":"guide-toulouse-pyrenees.com","n":"Guide Toulouse Pyr\u00e9n\u00e9es","r":"Toulouse, l'Ari\u00e8ge & les Pyr\u00e9n\u00e9es","gdg":"d":"guide-du-gers.com","n":"Guide du Gers","r":"Le Gers","gta":"d":"guide-tarn-aveyron.com","n":"Guide Tarn Aveyron","r":"le Tarn, l'Aveyron & le Tarn et Garonne","gdv":"d":"guide-de-la-vendee.com","n":"Guide de la Vend\u00e9e","r":"La Vend\u00e9e","gdc":"d":"guide-de-la-correze.com","n":"Guide de Corr\u00e8ze","r":"La Corr\u00e8ze"; var cityGuides = "cgst":"d":"city-guide-stockholm.com","n":"Guide de Stockholm","r":"Stockholm","cglp":"d":"city-guide-liverpool.com","n":"Guide de Liverpool","r":"Liverpool","cgpt":"d":"city-guide-porto.com","n":"Guide de Porto","r":"Porto","cgsv":"d":"city-guide-seville.com","n":"Guide de S\u00e9ville","r":"S\u00e9ville"; var guideMenuTitle = 'NOS AUTRES GUIDES';

    DécouvrezBordeaux& la GirondeMenuDécouvrirS'informerSe logerSe restaurerDégusterSe divertirSe RéunirAgendaSelon mes envies
      Accueil
        Agenda
          Jeux-concours
    Jeux-concoursAfficher la carteProposer un évènementJeux-concours à venirévènements 1 à 14 sur 85 évènements au total101234567...27Concours de beloteConcours de belote10/02/2023

    -

    Le Guide Tarn Aveyron vous aide \u00e0 pr\u00e9parer votre s\u00e9jourCe site utilise des cookies qui nous aident \u00e0 identifier le contenu qui int\u00e9resse le plus nos visiteurs et ainsi vous proposer la meilleure exp\u00e9rience pour pr\u00e9parer vos vacances. Ces cookies peuvent \u00e9galement nous permettre de rep\u00e9rer certains dysfonctionnements. Ils ne stockent aucune information personnelle et vous pouvez configurer votre navigateur afin de les bloquer. Ils ne nous permettent pas de savoir qui vous \u00eates exactement et o\u00f9 vous habitez... Nous collectons alors uniquement des donn\u00e9es anonymes. En utilisant ce site, vous acceptez que Google Analytics collecte des donn\u00e9es anonymes pour mesurer l'audience. Vous pouvez refuser en cliquant sur \u00ab Je refuse \u00bb. Conditions G\u00e9n\u00e9rales d'Utilisation.","MSGCONFIRMOPTOUT":"Vous vous \u00eates oppos\u00e9 au d\u00e9p\u00f4t de cookies de mesures d'audience dans votre navigateur","MSGCONFIRMOPTIN":"Vous avez accept\u00e9 le d\u00e9p\u00f4t de cookies de mesures d'audience dans votre navigateur","BTSHOWDETAILS":"En savoir plus","BTCONSENT":"J'accepte","BTNOTCONSENT":"Je refuse"};var gaID = 'UA-923761-10';var idCommune = 0;var eventFilterDates = '2023-02-09';var eventFilterCategory = "i":"582","l":"Jeux-concours","u":"\/fr\/agenda\/jeux-concours.html";var eventFilterCity = '';var eventCategories = ["i":"563","l":"March\u00e9s","n":"155","d":false,"u":"\/fr\/agenda\/marches.html","i":"564","l":"Brocantes, Vides greniers","n":"1","d":false,"u":"\/fr\/agenda\/brocantes-vides-greniers.html","i":"565","l":"F\u00eates populaires","n":"0","d":true,"i":"566","l":"Ev\u00e8nements sportifs","n":"15","d":false,"u":"\/fr\/agenda\/evenements-sportifs.html","i":"567","l":"Cin\u00e9ma","n":"14","d":false,"u":"\/fr\/agenda\/cinema.html","i":"568","l":"Concerts","n":"8","d":false,"u":"\/fr\/agenda\/concerts.html","i":"569","l":"Festivals","n":"29","d":false,"u":"\/fr\/agenda\/festivals.html","i":"570","l":"Spectacles","n":"1","d":false,"u":"\/fr\/agenda\/spectacles.html","i":"571","l":"Th\u00e9\u00e2tre","n":"14","d":false,"u":"\/fr\/agenda\/theatre.html","i":"572","l":"Cirque","n":"0","d":true,"i":"573","l":"Expositions","n":"11","d":false,"u":"\/fr\/agenda\/expositions.html","i":"574","l":"Sorties natures","n":"13","d":false,"u":"\/fr\/agenda\/sorties-natures.html","i":"575","l":"Culture","n":"14","d":false,"u":"\/fr\/agenda\/culture.html","i":"576","l":"Patrimoine","n":"0","d":true,"i":"577","l":"Danse","n":"8","d":false,"u":"\/fr\/agenda\/danse.html","i":"578","l":"Musique","n":"0","d":true,"i":"579","l":"Gastronomie","n":"1","d":false,"u":"\/fr\/agenda\/gastronomie.html","i":"580","l":"Foires et Salons","n":"13","d":false,"u":"\/fr\/agenda\/foires-et-salons.html","i":"581","l":"Conf\u00e9rences","n":"18","d":false,"u":"\/fr\/agenda\/conferences.html","i":"582","l":"Jeux-concours","n":"42","d":false,"u":"\/fr\/agenda\/jeux-concours.html","i":"583","l":"Lotos Quines","n":"0","d":true,"i":"584","l":"Divers","n":"26","d":false,"u":"\/fr\/agenda\/divers.html"];var urlCategory = '/fr/agenda/jeux-concours.html';var eventCities = ["i":"522","l":"Anglars-Saint-F\u00e9lix","u":"\/fr\/agenda\/jeux-concours\/anglars-saint-felix-522.html","n":"1","d":false,"i":"530","l":"Auzits","u":"\/fr\/agenda\/jeux-concours\/auzits-530.html","n":"1","d":false,"i":"588","l":"Creissels","u":"\/fr\/agenda\/jeux-concours\/creissels-588.html","n":"1","d":false,"i":"596","l":"Durenque","u":"\/fr\/agenda\/jeux-concours\/durenque-596.html","n":"2","d":false,"i":"619","l":"La Bastide-l'\u00c9v\u00eaque","u":"\/fr\/agenda\/jeux-concours\/la-bastide-l-eveque-619.html","n":"10","d":false,"i":"629","l":"La Loubi\u00e8re","u":"\/fr\/agenda\/jeux-concours\/la-loubiere-629.html","n":"5","d":false,"i":"638","l":"Laissac","u":"\/fr\/agenda\/jeux-concours\/laissac-638.html","n":"2","d":false,"i":"652","l":"L\u00e9dergues","u":"\/fr\/agenda\/jeux-concours\/ledergues-652.html","n":"3","d":false,"i":"668","l":"Mayran","u":"\/fr\/agenda\/jeux-concours\/mayran-668.html","n":"8","d":false,"i":"674","l":"Montclar","u":"\/fr\/agenda\/jeux-concours\/montclar-674.html","n":"2","d":false,"i":"701","l":"Palmas","u":"\/fr\/agenda\/jeux-concours\/palmas-701.html","n":"2","d":false,"i":"709","l":"Pont-de-Salars","u":"\/fr\/agenda\/jeux-concours\/pont-de-salars-709.html","n":"1","d":false,"i":"720","l":"R\u00e9quista","u":"\/fr\/agenda\/jeux-concours\/requista-720.html","n":"3","d":false,"i":"781","l":"Sauveterre-de-Rouergue","u":"\/fr\/agenda\/jeux-concours\/sauveterre-de-rouergue-781.html","n":"1","d":false];var urlCity = '/fr/agenda.html';var mapTilesProvider = 'osm';var mapCenter = ["44.1314249","2.0689438499999824"];var mapZoom = 9;var racine = '';var idPage = 582;var menuTitle = 'Menu';var guides = "gdp":"d":"guide-du-perigord.com","n":"Guide du P\u00e9rigord","r":"Le P\u00e9rigord","gdpb":"d":"guide-du-paysbasque.com","n":"Guide du Pays Basque","r":"Le Pays-Basque","gdl":"d":"guide-des-landes.com","n":"Guide des Landes","r":"Les Landes","gdlg":"d":"guide-du-lot-et-garonne.com","n":"Guide du Lot et Garonne","r":"Le Lot et Garonne","gdb":"d":"guide-bearn-pyrenees.com","n":"Guide B\u00e9arn Pyr\u00e9n\u00e9es","r":"Le B\u00e9arn Pyr\u00e9n\u00e9es","gdcm":"d":"guide-charente-maritime.com","n":"Guide de Charente Maritime","r":"La Charente Maritime","gbg":"d":"guide-bordeaux-gironde.com","n":"Guide Bordeaux Gironde","r":"Bordeaux & la Gironde","gtp":"d":"guide-toulouse-pyrenees.com","n":"Guide Toulouse Pyr\u00e9n\u00e9es","r":"Toulouse, l'Ari\u00e8ge & les Pyr\u00e9n\u00e9es","gdg":"d":"guide-du-gers.com","n":"Guide du Gers","r":"Le Gers","gdv":"d":"guide-de-la-vendee.com","n":"Guide de la Vend\u00e9e","r":"La Vend\u00e9e","gdc":"d":"guide-de-la-correze.com","n":"Guide de Corr\u00e8ze","r":"La Corr\u00e8ze";var cityGuides = "cgst":"d":"city-guide-stockholm.com","n":"Guide de Stockholm","r":"Stockholm","cglp":"d":"city-guide-liverpool.com","n":"Guide de Liverpool","r":"Liverpool","cgpt":"d":"city-guide-porto.com","n":"Guide de Porto","r":"Porto","cgsv":"d":"city-guide-seville.com","n":"Guide de S\u00e9ville","r":"S\u00e9ville";var guideMenuTitle = 'NOS AUTRES GUIDES';

      Accueil - Guide Tarn Aveyronl'AgendaAdresses utilesgeolocalisationle Blogla Newsletterla Météo
        FrançaisEnglish
      Espace ProFavorisConnexion
    Découvrez le Tarn, l'Aveyron& le Tarn et GaronneMenuDécouvrirS'informerSe logerSe restaurerDégusterSe divertirSe RéunirAgendaSelon mes envies
      Accueil
        Agenda
          Jeux-concours
    Jeux-concoursAfficher la carteProposer un évènementJeux-concours à venirévènements 1 à 14 sur 42 évènements au total1012323Soirée jeux - Vabre-TizacSoirée jeux - Vabre-Tizac09/02/2023

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/GabrielKnightSinsoftheFathers20thAnniversaryEdition_HOT_ Full.md b/spaces/bioriAsaeru/text-to-voice/GabrielKnightSinsoftheFathers20thAnniversaryEdition_HOT_ Full.md deleted file mode 100644 index 8d2a3450190de1cf95f8887d25b7c338685c8300..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/GabrielKnightSinsoftheFathers20thAnniversaryEdition_HOT_ Full.md +++ /dev/null @@ -1,70 +0,0 @@ -

    GabrielKnightSinsoftheFathers20thAnniversaryEditionFULL


    Download File ····· https://urloso.com/2uyOXo



    - -The Phoenix Release 0.9.6 (preview release) has been released today. - -This release has some new features. We hope you will like them. - -If you like the work that is done in the Phoenix Framework, you can become a patron of the project. - -Also, If you want to know more about the project, please go to www.phoenixframework.org. - -Changes in Phoenix 0.9.6 - -====================== - -**Only these changes are included in this release.** - -* Moved the guide from README.md to README.txt - -* Changed the root url from / to /_ - -* Added configuration to locate assets. Only the static dir is defined as './assets/static'. [#1215] - -* Moved the static file listing to `views/static_files` [#1175] - -* Updated README.md with more information. - -* Updated doc with more information. - -* If no content exists, the layout will be 'application'. [#1146] - -* Update the build to use up to date versions of Elixir v1.1.0. - -* Updated the COUCHDB_URL configuration. - -* Added --log_level to command line options. - -* Added --quiet to command line options. - -* Added --timeout to command line options. - -* Added doc to available options for `Application.ensure_all_started/2` [#1201] - -* Added --trace_level to command line options. - -* Added --with to command line options. - -* Added --gen_server_preloader to command line options. - -* Refactored the `Phoenix.Endpoint` [#1193] - -* Added "precaching" to web workers [#1193] - -* Added an example to web workers [#1193] - -* Added configurable key ring for passing keys to tasks [#1193] - -* Added `Phoenix.Http` `handle_cast/2` function to manage tasks [#1193] - -* Added task scheduler to `Phoenix.Http` - -* Added custom scheduler to `Phoenix.Http` - -* Added `Phoenix.Http` fun if message and task keys are available for the given `name/0` - -* Added `Phoenix.Http.Helpers.get_body/2` function to fetch body for an incoming message - -* 4fefd39f24
    -
    -
    -

    diff --git a/spaces/bioriAsaeru/text-to-voice/Gerix Wifi Cracker Android Apk Editeur Acdsee Obese PORTABLE.md b/spaces/bioriAsaeru/text-to-voice/Gerix Wifi Cracker Android Apk Editeur Acdsee Obese PORTABLE.md deleted file mode 100644 index c7a936cf0ac06f1f3f440b2e831c9cc74c0a5cf6..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Gerix Wifi Cracker Android Apk Editeur Acdsee Obese PORTABLE.md +++ /dev/null @@ -1,26 +0,0 @@ -
    -

    How to Use Gerix Wifi Cracker on Android with ACDSee Photo Editor

    -

    Gerix Wifi Cracker is a program that allows you to hack into wifi networks using a graphical user interface. It is based on the Backtrack Linux distribution and uses tools like Aircrack-ng, Airodump-ng, MDK3, and Reaver. Gerix Wifi Cracker can run on Android devices with root access and a custom kernel that supports wireless injection. In this article, we will show you how to install and use Gerix Wifi Cracker on your Android device and how to edit the captured wifi packets with ACDSee Photo Editor.

    -

    Step 1: Install Gerix Wifi Cracker on Android

    -

    To install Gerix Wifi Cracker on your Android device, you will need to download the apk file from SourceForge[^2^] and install it manually. You will also need to install BusyBox and Terminal Emulator from the Google Play Store. Make sure you have root access and a custom kernel that supports wireless injection on your device. You can check if your device is compatible by running the following command in Terminal Emulator:

    -

    Gerix Wifi Cracker Android Apk editeur acdsee obese


    Download Filehttps://urloso.com/2uyOjD



    -iw list | grep -i monitor -

    If you see something like "Supported interface modes: * IBSS * managed * AP * AP/VLAN * monitor", then your device is compatible. If not, you will need to find a custom kernel that supports wireless injection for your device model.

    -

    Step 2: Launch Gerix Wifi Cracker on Android

    -

    To launch Gerix Wifi Cracker on your Android device, open Terminal Emulator and type the following command:

    -su -

    This will grant root access to Terminal Emulator. Then type the following command:

    -cd /data/data/com.droid.developer.wifipassword/files/gerix-wifi-cracker -

    This will change the directory to where Gerix Wifi Cracker is installed. Then type the following command:

    -python gerix.py -

    This will launch Gerix Wifi Cracker with a graphical user interface. You should see something like this:

    -

    -Gerix Wifi Cracker GUI -

    Step 3: Scan for wifi networks with Gerix Wifi Cracker on Android

    -

    To scan for wifi networks with Gerix Wifi Cracker on your Android device, click on the "Scan" tab and then click on the "Start Scan" button. This will start scanning for nearby wifi networks and display them in a list. You can see the network name (ESSID), encryption type (WEP, WPA, WPA2), channel number, signal strength (PWR), MAC address (BSSID), and number of clients (STATION) for each network. You can also filter the networks by encryption type or channel number using the drop-down menus at the top.

    -

    Step 4: Hack into wifi networks with Gerix Wifi Cracker on Android

    -

    To hack into wifi networks with Gerix Wifi Cracker on your Android device, select a network from the list and click on the "Attack" tab. Depending on the encryption type of the network, you can choose different attack methods. For example, if the network is WEP-encrypted, you can use ARP request replay or chop-chop attack to capture enough data packets to crack the WEP key. If the network is WPA-encrypted, you can use deauthentication attack or WPS attack to capture the handshake or PIN code of the network. For more details on how to use each attack method, you can refer to the official documentation of Gerix Wifi Cracker[^2^]. Once you have captured enough data packets or handshake or PIN code of the network, you can use Aircrack-ng or Reaver to crack the password of the network.

    -

    Step 5: Edit wifi packets with ACDSee Photo Editor

    -

    To edit wifi packets with ACDSee Photo Editor, you will need to download and install ACDSee Photo Editor from Uptodown[^1^]. ACDSee Photo Editor is a powerful photo editing software that can also edit raw data files such as wifi packets. To edit

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/bla/tranny/App/Settings.py b/spaces/bla/tranny/App/Settings.py deleted file mode 100644 index f04b23bfa38defa76870edabff51c6de15c7c18e..0000000000000000000000000000000000000000 --- a/spaces/bla/tranny/App/Settings.py +++ /dev/null @@ -1,3 +0,0 @@ -class Settings: - ALGORITHM = "HS256" - HASH = "86c5ceb27e1bf441130299c0209e5f35b88089f62c06b2b09d65772274f12057" diff --git a/spaces/bookbot/Grad-TTS-Weildan-Playground/Grad-TTS/model/monotonic_align/setup.py b/spaces/bookbot/Grad-TTS-Weildan-Playground/Grad-TTS/model/monotonic_align/setup.py deleted file mode 100644 index 6242b251d1eef46d13503a8d71f6efdd29dd8bcf..0000000000000000000000000000000000000000 --- a/spaces/bookbot/Grad-TTS-Weildan-Playground/Grad-TTS/model/monotonic_align/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -""" from https://github.com/jaywalnut310/glow-tts """ - -from distutils.core import setup -from Cython.Build import cythonize -import numpy - -setup( - name = 'monotonic_align', - ext_modules = cythonize("core.pyx"), - include_dirs=[numpy.get_include()] -) diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/evaluation/fast_eval_api.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/evaluation/fast_eval_api.py deleted file mode 100644 index 2eb202bd5efa3ec3d366027b1debffc269ae8b17..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/evaluation/fast_eval_api.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import logging -import numpy as np -import time -from pycocotools.cocoeval import COCOeval - -from detectron2 import _C - -logger = logging.getLogger(__name__) - - -class COCOeval_opt(COCOeval): - """ - This is a slightly modified version of the original COCO API, where the functions evaluateImg() - and accumulate() are implemented in C++ to speedup evaluation - """ - - def evaluate(self): - """ - Run per image evaluation on given images and store results in self.evalImgs_cpp, a - datastructure that isn't readable from Python but is used by a c++ implementation of - accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure - self.evalImgs because this datastructure is a computational bottleneck. - :return: None - """ - tic = time.time() - - p = self.params - # add backward compatibility if useSegm is specified in params - if p.useSegm is not None: - p.iouType = "segm" if p.useSegm == 1 else "bbox" - logger.info("Evaluate annotation type *{}*".format(p.iouType)) - p.imgIds = list(np.unique(p.imgIds)) - if p.useCats: - p.catIds = list(np.unique(p.catIds)) - p.maxDets = sorted(p.maxDets) - self.params = p - - self._prepare() # bottleneck - - # loop through images, area range, max detection number - catIds = p.catIds if p.useCats else [-1] - - if p.iouType == "segm" or p.iouType == "bbox": - computeIoU = self.computeIoU - elif p.iouType == "keypoints": - computeIoU = self.computeOks - self.ious = { - (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds - } # bottleneck - - maxDet = p.maxDets[-1] - - # <<<< Beginning of code differences with original COCO API - def convert_instances_to_cpp(instances, is_det=False): - # Convert annotations for a list of instances in an image to a format that's fast - # to access in C++ - instances_cpp = [] - for instance in instances: - instance_cpp = _C.InstanceAnnotation( - int(instance["id"]), - instance["score"] if is_det else instance.get("score", 0.0), - instance["area"], - bool(instance.get("iscrowd", 0)), - bool(instance.get("ignore", 0)), - ) - instances_cpp.append(instance_cpp) - return instances_cpp - - # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++ - ground_truth_instances = [ - [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds] - for imgId in p.imgIds - ] - detected_instances = [ - [convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds] - for imgId in p.imgIds - ] - ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds] - - if not p.useCats: - # For each image, flatten per-category lists into a single list - ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances] - detected_instances = [[[o for c in i for o in c]] for i in detected_instances] - - # Call C++ implementation of self.evaluateImgs() - self._evalImgs_cpp = _C.COCOevalEvaluateImages( - p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances - ) - self._evalImgs = None - - self._paramsEval = copy.deepcopy(self.params) - toc = time.time() - logger.info("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic)) - # >>>> End of code differences with original COCO API - - def accumulate(self): - """ - Accumulate per image evaluation results and store the result in self.eval. Does not - support changing parameter settings from those used by self.evaluate() - """ - logger.info("Accumulating evaluation results...") - tic = time.time() - assert hasattr( - self, "_evalImgs_cpp" - ), "evaluate() must be called before accmulate() is called." - - self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp) - - # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections - self.eval["recall"] = np.array(self.eval["recall"]).reshape( - self.eval["counts"][:1] + self.eval["counts"][2:] - ) - - # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X - # num_area_ranges X num_max_detections - self.eval["precision"] = np.array(self.eval["precision"]).reshape(self.eval["counts"]) - self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"]) - toc = time.time() - logger.info("COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic)) diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/tests/test_setup.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/tests/test_setup.py deleted file mode 100644 index 165a1b9a7b64aa8a0fbe5b862ebfb6594e77c256..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/tests/test_setup.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import unittest - -from .common import ( - get_config_files, - get_evolution_config_files, - get_hrnet_config_files, - get_quick_schedules_config_files, - setup, -) - - -class TestSetup(unittest.TestCase): - def _test_setup(self, config_file): - setup(config_file) - - def test_setup_configs(self): - config_files = get_config_files() - for config_file in config_files: - self._test_setup(config_file) - - def test_setup_evolution_configs(self): - config_files = get_evolution_config_files() - for config_file in config_files: - self._test_setup(config_file) - - def test_setup_hrnet_configs(self): - config_files = get_hrnet_config_files() - for config_file in config_files: - self._test_setup(config_file) - - def test_setup_quick_schedules_configs(self): - config_files = get_quick_schedules_config_files() - for config_file in config_files: - self._test_setup(config_file) diff --git a/spaces/brjathu/HMR2.0/vendor/pyrender/pyrender/primitive.py b/spaces/brjathu/HMR2.0/vendor/pyrender/pyrender/primitive.py deleted file mode 100644 index 7f83f46f532b126a4573e715dd03d079fef755ca..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/pyrender/pyrender/primitive.py +++ /dev/null @@ -1,489 +0,0 @@ -"""Primitives, conforming to the glTF 2.0 standards as specified in -https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-primitive - -Author: Matthew Matl -""" -import numpy as np - -from OpenGL.GL import * - -from .material import Material, MetallicRoughnessMaterial -from .constants import FLOAT_SZ, UINT_SZ, BufFlags, GLTF -from .utils import format_color_array - - -class Primitive(object): - """A primitive object which can be rendered. - - Parameters - ---------- - positions : (n, 3) float - XYZ vertex positions. - normals : (n, 3) float - Normalized XYZ vertex normals. - tangents : (n, 4) float - XYZW vertex tangents where the w component is a sign value - (either +1 or -1) indicating the handedness of the tangent basis. - texcoord_0 : (n, 2) float - The first set of UV texture coordinates. - texcoord_1 : (n, 2) float - The second set of UV texture coordinates. - color_0 : (n, 4) float - RGBA vertex colors. - joints_0 : (n, 4) float - Joint information. - weights_0 : (n, 4) float - Weight information for morphing. - indices : (m, 3) int - Face indices for triangle meshes or fans. - material : :class:`Material` - The material to apply to this primitive when rendering. - mode : int - The type of primitives to render, one of the following: - - - ``0``: POINTS - - ``1``: LINES - - ``2``: LINE_LOOP - - ``3``: LINE_STRIP - - ``4``: TRIANGLES - - ``5``: TRIANGLES_STRIP - - ``6``: TRIANGLES_FAN - targets : (k,) int - Morph target indices. - poses : (x,4,4), float - Array of 4x4 transformation matrices for instancing this object. - """ - - def __init__(self, - positions, - normals=None, - tangents=None, - texcoord_0=None, - texcoord_1=None, - color_0=None, - joints_0=None, - weights_0=None, - indices=None, - material=None, - mode=None, - targets=None, - poses=None): - - if mode is None: - mode = GLTF.TRIANGLES - - self.positions = positions - self.normals = normals - self.tangents = tangents - self.texcoord_0 = texcoord_0 - self.texcoord_1 = texcoord_1 - self.color_0 = color_0 - self.joints_0 = joints_0 - self.weights_0 = weights_0 - self.indices = indices - self.material = material - self.mode = mode - self.targets = targets - self.poses = poses - - self._bounds = None - self._vaid = None - self._buffers = [] - self._is_transparent = None - self._buf_flags = None - - @property - def positions(self): - """(n,3) float : XYZ vertex positions. - """ - return self._positions - - @positions.setter - def positions(self, value): - value = np.asanyarray(value, dtype=np.float32) - self._positions = np.ascontiguousarray(value) - self._bounds = None - - @property - def normals(self): - """(n,3) float : Normalized XYZ vertex normals. - """ - return self._normals - - @normals.setter - def normals(self, value): - if value is not None: - value = np.asanyarray(value, dtype=np.float32) - value = np.ascontiguousarray(value) - if value.shape != self.positions.shape: - raise ValueError('Incorrect normals shape') - self._normals = value - - @property - def tangents(self): - """(n,4) float : XYZW vertex tangents. - """ - return self._tangents - - @tangents.setter - def tangents(self, value): - if value is not None: - value = np.asanyarray(value, dtype=np.float32) - value = np.ascontiguousarray(value) - if value.shape != (self.positions.shape[0], 4): - raise ValueError('Incorrect tangent shape') - self._tangents = value - - @property - def texcoord_0(self): - """(n,2) float : The first set of UV texture coordinates. - """ - return self._texcoord_0 - - @texcoord_0.setter - def texcoord_0(self, value): - if value is not None: - value = np.asanyarray(value, dtype=np.float32) - value = np.ascontiguousarray(value) - if (value.ndim != 2 or value.shape[0] != self.positions.shape[0] or - value.shape[1] < 2): - raise ValueError('Incorrect texture coordinate shape') - if value.shape[1] > 2: - value = value[:,:2] - self._texcoord_0 = value - - @property - def texcoord_1(self): - """(n,2) float : The second set of UV texture coordinates. - """ - return self._texcoord_1 - - @texcoord_1.setter - def texcoord_1(self, value): - if value is not None: - value = np.asanyarray(value, dtype=np.float32) - value = np.ascontiguousarray(value) - if (value.ndim != 2 or value.shape[0] != self.positions.shape[0] or - value.shape[1] != 2): - raise ValueError('Incorrect texture coordinate shape') - self._texcoord_1 = value - - @property - def color_0(self): - """(n,4) float : RGBA vertex colors. - """ - return self._color_0 - - @color_0.setter - def color_0(self, value): - if value is not None: - value = np.ascontiguousarray( - format_color_array(value, shape=(len(self.positions), 4)) - ) - self._is_transparent = None - self._color_0 = value - - @property - def joints_0(self): - """(n,4) float : Joint information. - """ - return self._joints_0 - - @joints_0.setter - def joints_0(self, value): - self._joints_0 = value - - @property - def weights_0(self): - """(n,4) float : Weight information for morphing. - """ - return self._weights_0 - - @weights_0.setter - def weights_0(self, value): - self._weights_0 = value - - @property - def indices(self): - """(m,3) int : Face indices for triangle meshes or fans. - """ - return self._indices - - @indices.setter - def indices(self, value): - if value is not None: - value = np.asanyarray(value, dtype=np.float32) - value = np.ascontiguousarray(value) - self._indices = value - - @property - def material(self): - """:class:`Material` : The material for this primitive. - """ - return self._material - - @material.setter - def material(self, value): - # Create default material - if value is None: - value = MetallicRoughnessMaterial() - else: - if not isinstance(value, Material): - raise TypeError('Object material must be of type Material') - self._material = value - - @property - def mode(self): - """int : The type of primitive to render. - """ - return self._mode - - @mode.setter - def mode(self, value): - value = int(value) - if value < GLTF.POINTS or value > GLTF.TRIANGLE_FAN: - raise ValueError('Invalid mode') - self._mode = value - - @property - def targets(self): - """(k,) int : Morph target indices. - """ - return self._targets - - @targets.setter - def targets(self, value): - self._targets = value - - @property - def poses(self): - """(x,4,4) float : Homogenous transforms for instancing this primitive. - """ - return self._poses - - @poses.setter - def poses(self, value): - if value is not None: - value = np.asanyarray(value, dtype=np.float32) - value = np.ascontiguousarray(value) - if value.ndim == 2: - value = value[np.newaxis,:,:] - if value.shape[1] != 4 or value.shape[2] != 4: - raise ValueError('Pose matrices must be of shape (n,4,4), ' - 'got {}'.format(value.shape)) - self._poses = value - self._bounds = None - - @property - def bounds(self): - if self._bounds is None: - self._bounds = self._compute_bounds() - return self._bounds - - @property - def centroid(self): - """(3,) float : The centroid of the primitive's AABB. - """ - return np.mean(self.bounds, axis=0) - - @property - def extents(self): - """(3,) float : The lengths of the axes of the primitive's AABB. - """ - return np.diff(self.bounds, axis=0).reshape(-1) - - @property - def scale(self): - """(3,) float : The length of the diagonal of the primitive's AABB. - """ - return np.linalg.norm(self.extents) - - @property - def buf_flags(self): - """int : The flags for the render buffer. - """ - if self._buf_flags is None: - self._buf_flags = self._compute_buf_flags() - return self._buf_flags - - def delete(self): - self._unbind() - self._remove_from_context() - - @property - def is_transparent(self): - """bool : If True, the mesh is partially-transparent. - """ - return self._compute_transparency() - - def _add_to_context(self): - if self._vaid is not None: - raise ValueError('Mesh is already bound to a context') - - # Generate and bind VAO - self._vaid = glGenVertexArrays(1) - glBindVertexArray(self._vaid) - - ####################################################################### - # Fill vertex buffer - ####################################################################### - - # Generate and bind vertex buffer - vertexbuffer = glGenBuffers(1) - self._buffers.append(vertexbuffer) - glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer) - - # positions - vertex_data = self.positions - attr_sizes = [3] - - # Normals - if self.normals is not None: - vertex_data = np.hstack((vertex_data, self.normals)) - attr_sizes.append(3) - - # Tangents - if self.tangents is not None: - vertex_data = np.hstack((vertex_data, self.tangents)) - attr_sizes.append(4) - - # Texture Coordinates - if self.texcoord_0 is not None: - vertex_data = np.hstack((vertex_data, self.texcoord_0)) - attr_sizes.append(2) - if self.texcoord_1 is not None: - vertex_data = np.hstack((vertex_data, self.texcoord_1)) - attr_sizes.append(2) - - # Color - if self.color_0 is not None: - vertex_data = np.hstack((vertex_data, self.color_0)) - attr_sizes.append(4) - - # TODO JOINTS AND WEIGHTS - # PASS - - # Copy data to buffer - vertex_data = np.ascontiguousarray( - vertex_data.flatten().astype(np.float32) - ) - glBufferData( - GL_ARRAY_BUFFER, FLOAT_SZ * len(vertex_data), - vertex_data, GL_STATIC_DRAW - ) - total_sz = sum(attr_sizes) - offset = 0 - for i, sz in enumerate(attr_sizes): - glVertexAttribPointer( - i, sz, GL_FLOAT, GL_FALSE, FLOAT_SZ * total_sz, - ctypes.c_void_p(FLOAT_SZ * offset) - ) - glEnableVertexAttribArray(i) - offset += sz - - ####################################################################### - # Fill model matrix buffer - ####################################################################### - - if self.poses is not None: - pose_data = np.ascontiguousarray( - np.transpose(self.poses, [0,2,1]).flatten().astype(np.float32) - ) - else: - pose_data = np.ascontiguousarray( - np.eye(4).flatten().astype(np.float32) - ) - - modelbuffer = glGenBuffers(1) - self._buffers.append(modelbuffer) - glBindBuffer(GL_ARRAY_BUFFER, modelbuffer) - glBufferData( - GL_ARRAY_BUFFER, FLOAT_SZ * len(pose_data), - pose_data, GL_STATIC_DRAW - ) - - for i in range(0, 4): - idx = i + len(attr_sizes) - glEnableVertexAttribArray(idx) - glVertexAttribPointer( - idx, 4, GL_FLOAT, GL_FALSE, FLOAT_SZ * 4 * 4, - ctypes.c_void_p(4 * FLOAT_SZ * i) - ) - glVertexAttribDivisor(idx, 1) - - ####################################################################### - # Fill element buffer - ####################################################################### - if self.indices is not None: - elementbuffer = glGenBuffers(1) - self._buffers.append(elementbuffer) - glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer) - glBufferData(GL_ELEMENT_ARRAY_BUFFER, UINT_SZ * self.indices.size, - self.indices.flatten().astype(np.uint32), - GL_STATIC_DRAW) - - glBindVertexArray(0) - - def _remove_from_context(self): - if self._vaid is not None: - glDeleteVertexArrays(1, [self._vaid]) - glDeleteBuffers(len(self._buffers), self._buffers) - self._vaid = None - self._buffers = [] - - def _in_context(self): - return self._vaid is not None - - def _bind(self): - if self._vaid is None: - raise ValueError('Cannot bind a Mesh that has not been added ' - 'to a context') - glBindVertexArray(self._vaid) - - def _unbind(self): - glBindVertexArray(0) - - def _compute_bounds(self): - """Compute the bounds of this object. - """ - # Compute bounds of this object - bounds = np.array([np.min(self.positions, axis=0), - np.max(self.positions, axis=0)]) - - # If instanced, compute translations for approximate bounds - if self.poses is not None: - bounds += np.array([np.min(self.poses[:,:3,3], axis=0), - np.max(self.poses[:,:3,3], axis=0)]) - return bounds - - def _compute_transparency(self): - """Compute whether or not this object is transparent. - """ - if self.material.is_transparent: - return True - if self._is_transparent is None: - self._is_transparent = False - if self.color_0 is not None: - if np.any(self._color_0[:,3] != 1.0): - self._is_transparent = True - return self._is_transparent - - def _compute_buf_flags(self): - buf_flags = BufFlags.POSITION - - if self.normals is not None: - buf_flags |= BufFlags.NORMAL - if self.tangents is not None: - buf_flags |= BufFlags.TANGENT - if self.texcoord_0 is not None: - buf_flags |= BufFlags.TEXCOORD_0 - if self.texcoord_1 is not None: - buf_flags |= BufFlags.TEXCOORD_1 - if self.color_0 is not None: - buf_flags |= BufFlags.COLOR_0 - if self.joints_0 is not None: - buf_flags |= BufFlags.JOINTS_0 - if self.weights_0 is not None: - buf_flags |= BufFlags.WEIGHTS_0 - - return buf_flags diff --git a/spaces/bugbounted/Whisper-Auto-Subtitled-Video-Generator/utils.py b/spaces/bugbounted/Whisper-Auto-Subtitled-Video-Generator/utils.py deleted file mode 100644 index ae54176dab8e141ed806c9ac7cd088f2d274b26a..0000000000000000000000000000000000000000 --- a/spaces/bugbounted/Whisper-Auto-Subtitled-Video-Generator/utils.py +++ /dev/null @@ -1,96 +0,0 @@ -import textwrap -import zlib -from typing import Iterator, TextIO - - -def exact_div(x, y): - assert x % y == 0 - return x // y - - -def str2bool(string): - str2val = {"True": True, "False": False} - if string in str2val: - return str2val[string] - else: - raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}") - - -def optional_int(string): - return None if string == "None" else int(string) - - -def optional_float(string): - return None if string == "None" else float(string) - - -def compression_ratio(text) -> float: - return len(text) / len(zlib.compress(text.encode("utf-8"))) - - -def format_timestamp(seconds: float, always_include_hours: bool = False, fractionalSeperator: str = '.'): - assert seconds >= 0, "non-negative timestamp expected" - milliseconds = round(seconds * 1000.0) - - hours = milliseconds // 3_600_000 - milliseconds -= hours * 3_600_000 - - minutes = milliseconds // 60_000 - milliseconds -= minutes * 60_000 - - seconds = milliseconds // 1_000 - milliseconds -= seconds * 1_000 - - hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else "" - return f"{hours_marker}{minutes:02d}:{seconds:02d}{fractionalSeperator}{milliseconds:03d}" - - -def write_txt(transcript: Iterator[dict], file: TextIO): - for segment in transcript: - print(segment['text'].strip(), file=file, flush=True) - - -def write_vtt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None): - print("WEBVTT\n", file=file) - for segment in transcript: - text = processText(segment['text'], maxLineWidth).replace('-->', '->') - - print( - f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n" - f"{text}\n", - file=file, - flush=True, - ) - - -def write_srt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None): - """ - Write a transcript to a file in SRT format. - Example usage: - from pathlib import Path - from whisper.utils import write_srt - result = transcribe(model, audio_path, temperature=temperature, **args) - # save SRT - audio_basename = Path(audio_path).stem - with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt: - write_srt(result["segments"], file=srt) - """ - for i, segment in enumerate(transcript, start=1): - text = processText(segment['text'].strip(), maxLineWidth).replace('-->', '->') - - # write srt lines - print( - f"{i}\n" - f"{format_timestamp(segment['start'], always_include_hours=True, fractionalSeperator=',')} --> " - f"{format_timestamp(segment['end'], always_include_hours=True, fractionalSeperator=',')}\n" - f"{text}\n", - file=file, - flush=True, - ) - -def processText(text: str, maxLineWidth=None): - if (maxLineWidth is None or maxLineWidth < 0): - return text - - lines = textwrap.wrap(text, width=maxLineWidth, tabsize=4) - return '\n'.join(lines) diff --git a/spaces/cadige/04-Gradio-SOTA/qasrl_model_pipeline.py b/spaces/cadige/04-Gradio-SOTA/qasrl_model_pipeline.py deleted file mode 100644 index abcb4e1e2ba93ae92aae2dc8dd353ed549d813dc..0000000000000000000000000000000000000000 --- a/spaces/cadige/04-Gradio-SOTA/qasrl_model_pipeline.py +++ /dev/null @@ -1,182 +0,0 @@ -from typing import Optional -import json -from argparse import Namespace -from pathlib import Path -from transformers import Text2TextGenerationPipeline, AutoModelForSeq2SeqLM, AutoTokenizer - -def get_markers_for_model(is_t5_model: bool) -> Namespace: - special_tokens_constants = Namespace() - if is_t5_model: - # T5 model have 100 special tokens by default - special_tokens_constants.separator_input_question_predicate = "" - special_tokens_constants.separator_output_answers = "" - special_tokens_constants.separator_output_questions = "" # if using only questions - special_tokens_constants.separator_output_question_answer = "" - special_tokens_constants.separator_output_pairs = "" - special_tokens_constants.predicate_generic_marker = "" - special_tokens_constants.predicate_verb_marker = "" - special_tokens_constants.predicate_nominalization_marker = "" - - else: - special_tokens_constants.separator_input_question_predicate = "" - special_tokens_constants.separator_output_answers = "" - special_tokens_constants.separator_output_questions = "" # if using only questions - special_tokens_constants.separator_output_question_answer = "" - special_tokens_constants.separator_output_pairs = "" - special_tokens_constants.predicate_generic_marker = "" - special_tokens_constants.predicate_verb_marker = "" - special_tokens_constants.predicate_nominalization_marker = "" - return special_tokens_constants - -def load_trained_model(name_or_path): - import huggingface_hub as HFhub - tokenizer = AutoTokenizer.from_pretrained(name_or_path) - model = AutoModelForSeq2SeqLM.from_pretrained(name_or_path) - # load preprocessing_kwargs from the model repo on HF hub, or from the local model directory - kwargs_filename = None - if name_or_path.startswith("kleinay/"): # and 'preprocessing_kwargs.json' in HFhub.list_repo_files(name_or_path): # the supported version of HFhub doesn't support list_repo_files - kwargs_filename = HFhub.hf_hub_download(repo_id=name_or_path, filename="preprocessing_kwargs.json") - elif Path(name_or_path).is_dir() and (Path(name_or_path) / "experiment_kwargs.json").exists(): - kwargs_filename = Path(name_or_path) / "experiment_kwargs.json" - - if kwargs_filename: - preprocessing_kwargs = json.load(open(kwargs_filename)) - # integrate into model.config (for decoding args, e.g. "num_beams"), and save also as standalone object for preprocessing - model.config.preprocessing_kwargs = Namespace(**preprocessing_kwargs) - model.config.update(preprocessing_kwargs) - return model, tokenizer - - -class QASRL_Pipeline(Text2TextGenerationPipeline): - def __init__(self, model_repo: str, **kwargs): - model, tokenizer = load_trained_model(model_repo) - super().__init__(model, tokenizer, framework="pt") - self.is_t5_model = "t5" in model.config.model_type - self.special_tokens = get_markers_for_model(self.is_t5_model) - self.data_args = model.config.preprocessing_kwargs - # backward compatibility - default keyword values implemeted in `run_summarization`, thus not saved in `preprocessing_kwargs` - if "predicate_marker_type" not in vars(self.data_args): - self.data_args.predicate_marker_type = "generic" - if "use_bilateral_predicate_marker" not in vars(self.data_args): - self.data_args.use_bilateral_predicate_marker = True - if "append_verb_form" not in vars(self.data_args): - self.data_args.append_verb_form = True - self._update_config(**kwargs) - - def _update_config(self, **kwargs): - " Update self.model.config with initialization parameters and necessary defaults. " - # set default values that will always override model.config, but can overriden by __init__ kwargs - kwargs["max_length"] = kwargs.get("max_length", 80) - # override model.config with kwargs - for k,v in kwargs.items(): - self.model.config.__dict__[k] = v - - def _sanitize_parameters(self, **kwargs): - preprocess_kwargs, forward_kwargs, postprocess_kwargs = {}, {}, {} - if "predicate_marker" in kwargs: - preprocess_kwargs["predicate_marker"] = kwargs["predicate_marker"] - if "predicate_type" in kwargs: - preprocess_kwargs["predicate_type"] = kwargs["predicate_type"] - if "verb_form" in kwargs: - preprocess_kwargs["verb_form"] = kwargs["verb_form"] - return preprocess_kwargs, forward_kwargs, postprocess_kwargs - - def preprocess(self, inputs, predicate_marker="", predicate_type=None, verb_form=None): - # Here, inputs is string or list of strings; apply string postprocessing - if isinstance(inputs, str): - processed_inputs = self._preprocess_string(inputs, predicate_marker, predicate_type, verb_form) - elif hasattr(inputs, "__iter__"): - processed_inputs = [self._preprocess_string(s, predicate_marker, predicate_type, verb_form) for s in inputs] - else: - raise ValueError("inputs must be str or Iterable[str]") - # Now pass to super.preprocess for tokenization - return super().preprocess(processed_inputs) - - def _preprocess_string(self, seq: str, predicate_marker: str, predicate_type: Optional[str], verb_form: Optional[str]) -> str: - sent_tokens = seq.split(" ") - assert predicate_marker in sent_tokens, f"Input sentence must include a predicate-marker token ('{predicate_marker}') before the target predicate word" - predicate_idx = sent_tokens.index(predicate_marker) - sent_tokens.remove(predicate_marker) - sentence_before_predicate = " ".join([sent_tokens[i] for i in range(predicate_idx)]) - predicate = sent_tokens[predicate_idx] - sentence_after_predicate = " ".join([sent_tokens[i] for i in range(predicate_idx+1, len(sent_tokens))]) - - if self.data_args.predicate_marker_type == "generic": - predicate_marker = self.special_tokens.predicate_generic_marker - # In case we want special marker for each predicate type: """ - elif self.data_args.predicate_marker_type == "pred_type": - assert predicate_type is not None, "For this model, you must provide the `predicate_type` either when initializing QASRL_Pipeline(...) or when applying __call__(...) on it" - assert predicate_type in ("verbal", "nominal"), f"`predicate_type` must be either 'verbal' or 'nominal'; got '{predicate_type}'" - predicate_marker = {"verbal": self.special_tokens.predicate_verb_marker , - "nominal": self.special_tokens.predicate_nominalization_marker - }[predicate_type] - - if self.data_args.use_bilateral_predicate_marker: - seq = f"{sentence_before_predicate} {predicate_marker} {predicate} {predicate_marker} {sentence_after_predicate}" - else: - seq = f"{sentence_before_predicate} {predicate_marker} {predicate} {sentence_after_predicate}" - - # embed also verb_form - if self.data_args.append_verb_form and verb_form is None: - raise ValueError(f"For this model, you must provide the `verb_form` of the predicate when applying __call__(...)") - elif self.data_args.append_verb_form: - seq = f"{seq} {self.special_tokens.separator_input_question_predicate} {verb_form} " - else: - seq = f"{seq} " - - # append source prefix (for t5 models) - prefix = self._get_source_prefix(predicate_type) - - return prefix + seq - - def _get_source_prefix(self, predicate_type: Optional[str]): - if not self.is_t5_model or self.data_args.source_prefix is None: - return '' - if not self.data_args.source_prefix.startswith("<"): # Regular prefix - not dependent on input row x - return self.data_args.source_prefix - if self.data_args.source_prefix == "": - if predicate_type is None: - raise ValueError("source_prefix is '' but input no `predicate_type`.") - else: - return f"Generate QAs for {predicate_type} QASRL: " - - def _forward(self, *args, **kwargs): - outputs = super()._forward(*args, **kwargs) - return outputs - - - def postprocess(self, model_outputs): - output_seq = self.tokenizer.decode( - model_outputs["output_ids"].squeeze(), - skip_special_tokens=False, - clean_up_tokenization_spaces=False, - ) - output_seq = output_seq.strip(self.tokenizer.pad_token).strip(self.tokenizer.eos_token).strip() - qa_subseqs = output_seq.split(self.special_tokens.separator_output_pairs) - qas = [self._postrocess_qa(qa_subseq) for qa_subseq in qa_subseqs] - return {"generated_text": output_seq, - "QAs": qas} - - def _postrocess_qa(self, seq: str) -> str: - # split question and answers - if self.special_tokens.separator_output_question_answer in seq: - question, answer = seq.split(self.special_tokens.separator_output_question_answer)[:2] - else: - print("invalid format: no separator between question and answer found...") - return None - # question, answer = seq, '' # Or: backoff to only question - # skip "_" slots in questions - question = ' '.join(t for t in question.split(' ') if t != '_') - answers = [a.strip() for a in answer.split(self.special_tokens.separator_output_answers)] - return {"question": question, "answers": answers} - - -if __name__ == "__main__": - pipe = QASRL_Pipeline("kleinay/qanom-seq2seq-model-baseline") - res1 = pipe("The student was interested in Luke 's research about sea animals .", verb_form="research", predicate_type="nominal") - res2 = pipe(["The doctor was interested in Luke 's treatment .", - "The Veterinary student was interested in Luke 's treatment of sea animals ."], verb_form="treat", predicate_type="nominal", num_beams=10) - res3 = pipe("A number of professions have developed that specialize in the treatment of mental disorders .", verb_form="develop", predicate_type="verbal") - print(res1) - print(res2) - print(res3) \ No newline at end of file diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py deleted file mode 100644 index df7a2aedf480ed8dc4aa3645e37420e9b893fae4..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py +++ /dev/null @@ -1,72 +0,0 @@ -import detectron2.data.transforms as T -from detectron2.config.lazy import LazyCall as L -from detectron2.layers.batch_norm import NaiveSyncBatchNorm -from detectron2.solver import WarmupParamScheduler -from fvcore.common.param_scheduler import MultiStepParamScheduler - -from ..common.data.coco import dataloader -from ..common.models.mask_rcnn_fpn import model -from ..common.optim import SGD as optimizer -from ..common.train import train - -# train from scratch -train.init_checkpoint = "" -train.amp.enabled = True -train.ddp.fp16_compression = True -model.backbone.bottom_up.freeze_at = 0 - -# SyncBN -# fmt: off -model.backbone.bottom_up.stem.norm = \ - model.backbone.bottom_up.stages.norm = \ - model.backbone.norm = "SyncBN" - -# Using NaiveSyncBatchNorm becase heads may have empty input. That is not supported by -# torch.nn.SyncBatchNorm. We can remove this after -# https://github.com/pytorch/pytorch/issues/36530 is fixed. -model.roi_heads.box_head.conv_norm = \ - model.roi_heads.mask_head.conv_norm = lambda c: NaiveSyncBatchNorm(c, - stats_mode="N") -# fmt: on - -# 2conv in RPN: -# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/modeling/architecture/heads.py#L95-L97 # noqa: E501, B950 -model.proposal_generator.head.conv_dims = [-1, -1] - -# 4conv1fc box head -model.roi_heads.box_head.conv_dims = [256, 256, 256, 256] -model.roi_heads.box_head.fc_dims = [1024] - -# resize_and_crop_image in: -# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/utils/input_utils.py#L127 # noqa: E501, B950 -image_size = 1024 -dataloader.train.mapper.augmentations = [ - L(T.ResizeScale)( - min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size - ), - L(T.FixedSizeCrop)(crop_size=(image_size, image_size)), - L(T.RandomFlip)(horizontal=True), -] - -# recompute boxes due to cropping -dataloader.train.mapper.recompute_boxes = True - -# larger batch-size. -dataloader.train.total_batch_size = 64 - -# Equivalent to 100 epochs. -# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep -train.max_iter = 184375 - -lr_multiplier = L(WarmupParamScheduler)( - scheduler=L(MultiStepParamScheduler)( - values=[1.0, 0.1, 0.01], - milestones=[163889, 177546], - num_updates=train.max_iter, - ), - warmup_length=500 / train.max_iter, - warmup_factor=0.067, -) - -optimizer.lr = 0.1 -optimizer.weight_decay = 4e-5 diff --git a/spaces/cedssama/I3D_Sign_Language_Classification/app.py b/spaces/cedssama/I3D_Sign_Language_Classification/app.py deleted file mode 100644 index 0ac2284825f4a7e7bc89397cfc8192952dd9507a..0000000000000000000000000000000000000000 --- a/spaces/cedssama/I3D_Sign_Language_Classification/app.py +++ /dev/null @@ -1,121 +0,0 @@ -import torch -import cv2 -import videotransforms -import numpy as np -import gradio as gr -from einops import rearrange -from torchvision import transforms -from pytorch_i3d import InceptionI3d - - -def preprocess(vidpath): - # Fetch video - cap = cv2.VideoCapture(vidpath) - - frames = [] - cap.set(cv2.CAP_PROP_POS_FRAMES, 0) - num = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - - # Extract frames from video - for _ in range(num): - _, img = cap.read() - - # Skip NoneType frames - if img is None: - continue - - # Resize if (w,h) < (226,226) - w, h, c = img.shape - if w < 226 or h < 226: - d = 226. - min(w, h) - sc = 1 + d / min(w, h) - img = cv2.resize(img, dsize=(0, 0), fx=sc, fy=sc) - - # Normalize - img = (img / 255.) * 2 - 1 - - frames.append(img) - - frames = torch.Tensor(np.asarray(frames, dtype=np.float32)) - - # Transform tensor and reshape to (1, c, t ,w, h) - transform = transforms.Compose([videotransforms.CenterCrop(224)]) - frames = transform(frames) - frames = rearrange(frames, 't w h c-> 1 c t w h') - - return frames - -def classify(video,dataset='WLASL100'): - to_load = { - 'WLASL100':{'logits':100,'path':'weights/asl100/FINAL_nslt_100_iters=896_top1=65.89_top5=84.11_top10=89.92.pt'}, - 'WLASL2000':{'logits':2000,'path':'weights/asl2000/FINAL_nslt_2000_iters=5104_top1=32.48_top5=57.31_top10=66.31.pt'} - } - - # Preprocess video - input = preprocess(video) - - # Load model - model = InceptionI3d() - model.load_state_dict(torch.load('weights/rgb_imagenet.pt',map_location=torch.device('cpu'))) - model.replace_logits(to_load[dataset]['logits']) - model.load_state_dict(torch.load(to_load[dataset]['path'],map_location=torch.device('cpu'))) - - # Run on cpu. Spaces environment is limited to CPU for free users. - model.cpu() - - # Evaluation mode - model.eval() - - with torch.no_grad(): # Disable gradient computation - per_frame_logits = model(input) # Inference - - per_frame_logits.cpu() - model.cpu() - - # Load predictions - predictions = rearrange(per_frame_logits,'1 j k -> j k') - predictions = torch.mean(predictions, dim = 1) - - # Fetch top 10 predictions - _, index = torch.topk(predictions,10) - index = index.cpu().numpy() - - # Load labels - with open('wlasl_class_list.txt') as f: - idx2label = dict() - for line in f: - idx2label[int(line.split()[0])]=line.split()[1] - - # Get probabilities - predictions = torch.nn.functional.softmax(predictions, dim=0).cpu().numpy() - - # Return dict {label:pred} - return {idx2label[i]:float(predictions[i]) for i in index} - -# Gradio App config -title = "I3D Sign Language Recognition" -description = "Gradio demo of word-level sign language classification using I3D model pretrained on the WLASL video dataset. " \ - "WLASL is a large-scale dataset containing more than 2000 words in American Sign Language. " \ - "Examples used in the demo are videos from the the test subset. " \ - "Note that WLASL100 contains 100 words while WLASL2000 contains 2000." -examples = [ - ['videos/no.mp4','WLASL100'], - ['videos/all.mp4','WLASL100'], - ['videos/before.mp4','WLASL100'], - ['videos/blue.mp4','WLASL2000'], - ['videos/white.mp4','WLASL2000'], - ['videos/accident2.mp4','WLASL2000'] - ] - -article = "NOTE: This is not the official demonstration of the I3D sign language classification on the WLASL dataset. "\ - "More information about the WLASL dataset and pretrained I3D models can be found here." - -# Gradio App interface -gr.Interface( fn=classify, - inputs=[gr.inputs.Video(label="Video (*.mp4)"),gr.inputs.Radio(choices=['WLASL100','WLASL2000'], default='WLASL100', label='Trained on:')], - outputs=[gr.outputs.Label(num_top_classes=5, label='Top 5 Predictions')], - allow_flagging="never", - title=title, - description=description, - examples=examples, - article=article).launch() diff --git a/spaces/cfwef/gpt/crazy_functions/test_project/latex/attention/parameter_attention.tex b/spaces/cfwef/gpt/crazy_functions/test_project/latex/attention/parameter_attention.tex deleted file mode 100644 index 7bc4fe452dbdbfe44ff72f0cdbd37acd5c786ce6..0000000000000000000000000000000000000000 --- a/spaces/cfwef/gpt/crazy_functions/test_project/latex/attention/parameter_attention.tex +++ /dev/null @@ -1,45 +0,0 @@ -\pagebreak -\section*{Two Feed-Forward Layers = Attention over Parameters}\label{sec:parameter_attention} - -In addition to attention layers, our model contains position-wise feed-forward networks (Section \ref{sec:ffn}), which consist of two linear transformations with a ReLU activation in between. In fact, these networks too can be seen as a form of attention. Compare the formula for such a network with the formula for a simple dot-product attention layer (biases and scaling factors omitted): - -\begin{align*} - FFN(x, W_1, W_2) = ReLU(xW_1)W_2 \\ - A(q, K, V) = Softmax(qK^T)V -\end{align*} - -Based on the similarity of these formulae, the two-layer feed-forward network can be seen as a kind of attention, where the keys and values are the rows of the trainable parameter matrices $W_1$ and $W_2$, and where we use ReLU instead of Softmax in the compatibility function. - -%the compatablity function is $compat(q, k_i) = ReLU(q \cdot k_i)$ instead of $Softmax(qK_T)_i$. - -Given this similarity, we experimented with replacing the position-wise feed-forward networks with attention layers similar to the ones we use everywhere else our model. The multi-head-attention-over-parameters sublayer is identical to the multi-head attention described in \ref{sec:multihead}, except that the "keys" and "values" inputs to each attention head are trainable model parameters, as opposed to being linear projections of a previous layer. These parameters are scaled up by a factor of $\sqrt{d_{model}}$ in order to be more similar to activations. - -In our first experiment, we replaced each position-wise feed-forward network with a multi-head-attention-over-parameters sublayer with $h_p=8$ heads, key-dimensionality $d_{pk}=64$, and value-dimensionality $d_{pv}=64$, using $n_p=1536$ key-value pairs for each attention head. The sublayer has a total of $2097152$ parameters, including the parameters in the query projection and the output projection. This matches the number of parameters in the position-wise feed-forward network that we replaced. While the theoretical amount of computation is also the same, in practice, the attention version caused the step times to be about 30\% longer. - -In our second experiment, we used $h_p=8$ heads, and $n_p=512$ key-value pairs for each attention head, again matching the total number of parameters in the base model. - -Results for the first experiment were slightly worse than for the base model, and results for the second experiment were slightly better, see Table~\ref{tab:parameter_attention}. - -\begin{table}[h] -\caption{Replacing the position-wise feed-forward networks with multihead-attention-over-parameters produces similar results to the base model. All metrics are on the English-to-German translation development set, newstest2013.} -\label{tab:parameter_attention} -\begin{center} -\vspace{-2mm} -%\scalebox{1.0}{ -\begin{tabular}{c|cccccc|cccc} -\hline\rule{0pt}{2.0ex} - & \multirow{2}{*}{$\dmodel$} & \multirow{2}{*}{$\dff$} & -\multirow{2}{*}{$h_p$} & \multirow{2}{*}{$d_{pk}$} & \multirow{2}{*}{$d_{pv}$} & - \multirow{2}{*}{$n_p$} & - PPL & BLEU & params & training\\ - & & & & & & & (dev) & (dev) & $\times10^6$ & time \\ -\hline\rule{0pt}{2.0ex} -base & 512 & 2048 & & & & & 4.92 & 25.8 & 65 & 12 hours\\ -\hline\rule{0pt}{2.0ex} -AOP$_1$ & 512 & & 8 & 64 & 64 & 1536 & 4.92& 25.5 & 65 & 16 hours\\ -AOP$_2$ & 512 & & 16 & 64 & 64 & 512 & \textbf{4.86} & \textbf{25.9} & 65 & 16 hours \\ -\hline -\end{tabular} -%} -\end{center} -\end{table} diff --git a/spaces/chendl/compositional_test/multimodal/offline_labeling.py b/spaces/chendl/compositional_test/multimodal/offline_labeling.py deleted file mode 100644 index e9273a1c1a44f21f3e7197d7b60251cc7e71a81d..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/offline_labeling.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import sys -GPU_PER_NODE = 6 -TASK_PER_GPU = 8 - -if __name__ == "__main__": - split = sys.argv[1] - start_idx = sys.argv[2] - end_idx = sys.argv[3] - job_id = os.environ["SLURM_JOBID"] - gpu_id = 0 - job_bash = f"temp/job/{job_id}.sh" - with open(job_bash, "w") as f: - f.write("export TRANSFORMERS_OFFLINE=1\n") - for i, idx in enumerate(range(int(start_idx), int(end_idx))): - zfill_idx = str(idx).zfill(6) - f.write(f"CUDA_VISIBLE_DEVICES={gpu_id} python3 offline_grounding_dino.py {split} {zfill_idx} &> temp/log/{split}_{zfill_idx}_{job_id}_{gpu_id}.txt &\n") - gpu_id = (gpu_id + 1) % GPU_PER_NODE - f.write("sleep 7200\n") - print("run!") - os.system(f"bash {job_bash}") - print("end!") diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/data/__init__.py b/spaces/chendl/compositional_test/transformers/src/transformers/data/__init__.py deleted file mode 100644 index 1a8ef35ff439e48caf92dba731f7c551f6dcf285..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/src/transformers/data/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .data_collator import ( - DataCollatorForLanguageModeling, - DataCollatorForPermutationLanguageModeling, - DataCollatorForSeq2Seq, - DataCollatorForSOP, - DataCollatorForTokenClassification, - DataCollatorForWholeWordMask, - DataCollatorWithPadding, - DefaultDataCollator, - default_data_collator, -) -from .metrics import glue_compute_metrics, xnli_compute_metrics -from .processors import ( - DataProcessor, - InputExample, - InputFeatures, - SingleSentenceClassificationProcessor, - SquadExample, - SquadFeatures, - SquadV1Processor, - SquadV2Processor, - glue_convert_examples_to_features, - glue_output_modes, - glue_processors, - glue_tasks_num_labels, - squad_convert_examples_to_features, - xnli_output_modes, - xnli_processors, - xnli_tasks_num_labels, -) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ContainerIO.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ContainerIO.py deleted file mode 100644 index 45e80b39af72c15aa58c08618daa7289d96649d0..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ContainerIO.py +++ /dev/null @@ -1,120 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# a class to read from a container file -# -# History: -# 1995-06-18 fl Created -# 1995-09-07 fl Added readline(), readlines() -# -# Copyright (c) 1997-2001 by Secret Labs AB -# Copyright (c) 1995 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - - -import io - - -class ContainerIO: - """ - A file object that provides read access to a part of an existing - file (for example a TAR file). - """ - - def __init__(self, file, offset, length): - """ - Create file object. - - :param file: Existing file. - :param offset: Start of region, in bytes. - :param length: Size of region, in bytes. - """ - self.fh = file - self.pos = 0 - self.offset = offset - self.length = length - self.fh.seek(offset) - - ## - # Always false. - - def isatty(self): - return False - - def seek(self, offset, mode=io.SEEK_SET): - """ - Move file pointer. - - :param offset: Offset in bytes. - :param mode: Starting position. Use 0 for beginning of region, 1 - for current offset, and 2 for end of region. You cannot move - the pointer outside the defined region. - """ - if mode == 1: - self.pos = self.pos + offset - elif mode == 2: - self.pos = self.length + offset - else: - self.pos = offset - # clamp - self.pos = max(0, min(self.pos, self.length)) - self.fh.seek(self.offset + self.pos) - - def tell(self): - """ - Get current file pointer. - - :returns: Offset from start of region, in bytes. - """ - return self.pos - - def read(self, n=0): - """ - Read data. - - :param n: Number of bytes to read. If omitted or zero, - read until end of region. - :returns: An 8-bit string. - """ - if n: - n = min(n, self.length - self.pos) - else: - n = self.length - self.pos - if not n: # EOF - return b"" if "b" in self.fh.mode else "" - self.pos = self.pos + n - return self.fh.read(n) - - def readline(self): - """ - Read a line of text. - - :returns: An 8-bit string. - """ - s = b"" if "b" in self.fh.mode else "" - newline_character = b"\n" if "b" in self.fh.mode else "\n" - while True: - c = self.read(1) - if not c: - break - s = s + c - if c == newline_character: - break - return s - - def readlines(self): - """ - Read multiple lines of text. - - :returns: A list of 8-bit strings. - """ - lines = [] - while True: - s = self.readline() - if not s: - break - lines.append(s) - return lines diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/PngImagePlugin.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/PngImagePlugin.py deleted file mode 100644 index bfa8cb7ac66c15e2f5d1128f4ba9a1ad69758ec1..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/PngImagePlugin.py +++ /dev/null @@ -1,1456 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# PNG support code -# -# See "PNG (Portable Network Graphics) Specification, version 1.0; -# W3C Recommendation", 1996-10-01, Thomas Boutell (ed.). -# -# history: -# 1996-05-06 fl Created (couldn't resist it) -# 1996-12-14 fl Upgraded, added read and verify support (0.2) -# 1996-12-15 fl Separate PNG stream parser -# 1996-12-29 fl Added write support, added getchunks -# 1996-12-30 fl Eliminated circular references in decoder (0.3) -# 1998-07-12 fl Read/write 16-bit images as mode I (0.4) -# 2001-02-08 fl Added transparency support (from Zircon) (0.5) -# 2001-04-16 fl Don't close data source in "open" method (0.6) -# 2004-02-24 fl Don't even pretend to support interlaced files (0.7) -# 2004-08-31 fl Do basic sanity check on chunk identifiers (0.8) -# 2004-09-20 fl Added PngInfo chunk container -# 2004-12-18 fl Added DPI read support (based on code by Niki Spahiev) -# 2008-08-13 fl Added tRNS support for RGB images -# 2009-03-06 fl Support for preserving ICC profiles (by Florian Hoech) -# 2009-03-08 fl Added zTXT support (from Lowell Alleman) -# 2009-03-29 fl Read interlaced PNG files (from Conrado Porto Lopes Gouvua) -# -# Copyright (c) 1997-2009 by Secret Labs AB -# Copyright (c) 1996 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -import itertools -import logging -import re -import struct -import warnings -import zlib -from enum import IntEnum - -from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence -from ._binary import i16be as i16 -from ._binary import i32be as i32 -from ._binary import o8 -from ._binary import o16be as o16 -from ._binary import o32be as o32 - -logger = logging.getLogger(__name__) - -is_cid = re.compile(rb"\w\w\w\w").match - - -_MAGIC = b"\211PNG\r\n\032\n" - - -_MODES = { - # supported bits/color combinations, and corresponding modes/rawmodes - # Greyscale - (1, 0): ("1", "1"), - (2, 0): ("L", "L;2"), - (4, 0): ("L", "L;4"), - (8, 0): ("L", "L"), - (16, 0): ("I", "I;16B"), - # Truecolour - (8, 2): ("RGB", "RGB"), - (16, 2): ("RGB", "RGB;16B"), - # Indexed-colour - (1, 3): ("P", "P;1"), - (2, 3): ("P", "P;2"), - (4, 3): ("P", "P;4"), - (8, 3): ("P", "P"), - # Greyscale with alpha - (8, 4): ("LA", "LA"), - (16, 4): ("RGBA", "LA;16B"), # LA;16B->LA not yet available - # Truecolour with alpha - (8, 6): ("RGBA", "RGBA"), - (16, 6): ("RGBA", "RGBA;16B"), -} - - -_simple_palette = re.compile(b"^\xff*\x00\xff*$") - -MAX_TEXT_CHUNK = ImageFile.SAFEBLOCK -""" -Maximum decompressed size for a iTXt or zTXt chunk. -Eliminates decompression bombs where compressed chunks can expand 1000x. -See :ref:`Text in PNG File Format`. -""" -MAX_TEXT_MEMORY = 64 * MAX_TEXT_CHUNK -""" -Set the maximum total text chunk size. -See :ref:`Text in PNG File Format`. -""" - - -# APNG frame disposal modes -class Disposal(IntEnum): - OP_NONE = 0 - """ - No disposal is done on this frame before rendering the next frame. - See :ref:`Saving APNG sequences`. - """ - OP_BACKGROUND = 1 - """ - This frame’s modified region is cleared to fully transparent black before rendering - the next frame. - See :ref:`Saving APNG sequences`. - """ - OP_PREVIOUS = 2 - """ - This frame’s modified region is reverted to the previous frame’s contents before - rendering the next frame. - See :ref:`Saving APNG sequences`. - """ - - -# APNG frame blend modes -class Blend(IntEnum): - OP_SOURCE = 0 - """ - All color components of this frame, including alpha, overwrite the previous output - image contents. - See :ref:`Saving APNG sequences`. - """ - OP_OVER = 1 - """ - This frame should be alpha composited with the previous output image contents. - See :ref:`Saving APNG sequences`. - """ - - -def _safe_zlib_decompress(s): - dobj = zlib.decompressobj() - plaintext = dobj.decompress(s, MAX_TEXT_CHUNK) - if dobj.unconsumed_tail: - msg = "Decompressed Data Too Large" - raise ValueError(msg) - return plaintext - - -def _crc32(data, seed=0): - return zlib.crc32(data, seed) & 0xFFFFFFFF - - -# -------------------------------------------------------------------- -# Support classes. Suitable for PNG and related formats like MNG etc. - - -class ChunkStream: - def __init__(self, fp): - self.fp = fp - self.queue = [] - - def read(self): - """Fetch a new chunk. Returns header information.""" - cid = None - - if self.queue: - cid, pos, length = self.queue.pop() - self.fp.seek(pos) - else: - s = self.fp.read(8) - cid = s[4:] - pos = self.fp.tell() - length = i32(s) - - if not is_cid(cid): - if not ImageFile.LOAD_TRUNCATED_IMAGES: - msg = f"broken PNG file (chunk {repr(cid)})" - raise SyntaxError(msg) - - return cid, pos, length - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - def close(self): - self.queue = self.fp = None - - def push(self, cid, pos, length): - self.queue.append((cid, pos, length)) - - def call(self, cid, pos, length): - """Call the appropriate chunk handler""" - - logger.debug("STREAM %r %s %s", cid, pos, length) - return getattr(self, "chunk_" + cid.decode("ascii"))(pos, length) - - def crc(self, cid, data): - """Read and verify checksum""" - - # Skip CRC checks for ancillary chunks if allowed to load truncated - # images - # 5th byte of first char is 1 [specs, section 5.4] - if ImageFile.LOAD_TRUNCATED_IMAGES and (cid[0] >> 5 & 1): - self.crc_skip(cid, data) - return - - try: - crc1 = _crc32(data, _crc32(cid)) - crc2 = i32(self.fp.read(4)) - if crc1 != crc2: - msg = f"broken PNG file (bad header checksum in {repr(cid)})" - raise SyntaxError(msg) - except struct.error as e: - msg = f"broken PNG file (incomplete checksum in {repr(cid)})" - raise SyntaxError(msg) from e - - def crc_skip(self, cid, data): - """Read checksum""" - - self.fp.read(4) - - def verify(self, endchunk=b"IEND"): - # Simple approach; just calculate checksum for all remaining - # blocks. Must be called directly after open. - - cids = [] - - while True: - try: - cid, pos, length = self.read() - except struct.error as e: - msg = "truncated PNG file" - raise OSError(msg) from e - - if cid == endchunk: - break - self.crc(cid, ImageFile._safe_read(self.fp, length)) - cids.append(cid) - - return cids - - -class iTXt(str): - """ - Subclass of string to allow iTXt chunks to look like strings while - keeping their extra information - - """ - - @staticmethod - def __new__(cls, text, lang=None, tkey=None): - """ - :param cls: the class to use when creating the instance - :param text: value for this key - :param lang: language code - :param tkey: UTF-8 version of the key name - """ - - self = str.__new__(cls, text) - self.lang = lang - self.tkey = tkey - return self - - -class PngInfo: - """ - PNG chunk container (for use with save(pnginfo=)) - - """ - - def __init__(self): - self.chunks = [] - - def add(self, cid, data, after_idat=False): - """Appends an arbitrary chunk. Use with caution. - - :param cid: a byte string, 4 bytes long. - :param data: a byte string of the encoded data - :param after_idat: for use with private chunks. Whether the chunk - should be written after IDAT - - """ - - chunk = [cid, data] - if after_idat: - chunk.append(True) - self.chunks.append(tuple(chunk)) - - def add_itxt(self, key, value, lang="", tkey="", zip=False): - """Appends an iTXt chunk. - - :param key: latin-1 encodable text key name - :param value: value for this key - :param lang: language code - :param tkey: UTF-8 version of the key name - :param zip: compression flag - - """ - - if not isinstance(key, bytes): - key = key.encode("latin-1", "strict") - if not isinstance(value, bytes): - value = value.encode("utf-8", "strict") - if not isinstance(lang, bytes): - lang = lang.encode("utf-8", "strict") - if not isinstance(tkey, bytes): - tkey = tkey.encode("utf-8", "strict") - - if zip: - self.add( - b"iTXt", - key + b"\0\x01\0" + lang + b"\0" + tkey + b"\0" + zlib.compress(value), - ) - else: - self.add(b"iTXt", key + b"\0\0\0" + lang + b"\0" + tkey + b"\0" + value) - - def add_text(self, key, value, zip=False): - """Appends a text chunk. - - :param key: latin-1 encodable text key name - :param value: value for this key, text or an - :py:class:`PIL.PngImagePlugin.iTXt` instance - :param zip: compression flag - - """ - if isinstance(value, iTXt): - return self.add_itxt(key, value, value.lang, value.tkey, zip=zip) - - # The tEXt chunk stores latin-1 text - if not isinstance(value, bytes): - try: - value = value.encode("latin-1", "strict") - except UnicodeError: - return self.add_itxt(key, value, zip=zip) - - if not isinstance(key, bytes): - key = key.encode("latin-1", "strict") - - if zip: - self.add(b"zTXt", key + b"\0\0" + zlib.compress(value)) - else: - self.add(b"tEXt", key + b"\0" + value) - - -# -------------------------------------------------------------------- -# PNG image stream (IHDR/IEND) - - -class PngStream(ChunkStream): - def __init__(self, fp): - super().__init__(fp) - - # local copies of Image attributes - self.im_info = {} - self.im_text = {} - self.im_size = (0, 0) - self.im_mode = None - self.im_tile = None - self.im_palette = None - self.im_custom_mimetype = None - self.im_n_frames = None - self._seq_num = None - self.rewind_state = None - - self.text_memory = 0 - - def check_text_memory(self, chunklen): - self.text_memory += chunklen - if self.text_memory > MAX_TEXT_MEMORY: - msg = ( - "Too much memory used in text chunks: " - f"{self.text_memory}>MAX_TEXT_MEMORY" - ) - raise ValueError(msg) - - def save_rewind(self): - self.rewind_state = { - "info": self.im_info.copy(), - "tile": self.im_tile, - "seq_num": self._seq_num, - } - - def rewind(self): - self.im_info = self.rewind_state["info"] - self.im_tile = self.rewind_state["tile"] - self._seq_num = self.rewind_state["seq_num"] - - def chunk_iCCP(self, pos, length): - # ICC profile - s = ImageFile._safe_read(self.fp, length) - # according to PNG spec, the iCCP chunk contains: - # Profile name 1-79 bytes (character string) - # Null separator 1 byte (null character) - # Compression method 1 byte (0) - # Compressed profile n bytes (zlib with deflate compression) - i = s.find(b"\0") - logger.debug("iCCP profile name %r", s[:i]) - logger.debug("Compression method %s", s[i]) - comp_method = s[i] - if comp_method != 0: - msg = f"Unknown compression method {comp_method} in iCCP chunk" - raise SyntaxError(msg) - try: - icc_profile = _safe_zlib_decompress(s[i + 2 :]) - except ValueError: - if ImageFile.LOAD_TRUNCATED_IMAGES: - icc_profile = None - else: - raise - except zlib.error: - icc_profile = None # FIXME - self.im_info["icc_profile"] = icc_profile - return s - - def chunk_IHDR(self, pos, length): - # image header - s = ImageFile._safe_read(self.fp, length) - if length < 13: - if ImageFile.LOAD_TRUNCATED_IMAGES: - return s - msg = "Truncated IHDR chunk" - raise ValueError(msg) - self.im_size = i32(s, 0), i32(s, 4) - try: - self.im_mode, self.im_rawmode = _MODES[(s[8], s[9])] - except Exception: - pass - if s[12]: - self.im_info["interlace"] = 1 - if s[11]: - msg = "unknown filter category" - raise SyntaxError(msg) - return s - - def chunk_IDAT(self, pos, length): - # image data - if "bbox" in self.im_info: - tile = [("zip", self.im_info["bbox"], pos, self.im_rawmode)] - else: - if self.im_n_frames is not None: - self.im_info["default_image"] = True - tile = [("zip", (0, 0) + self.im_size, pos, self.im_rawmode)] - self.im_tile = tile - self.im_idat = length - raise EOFError - - def chunk_IEND(self, pos, length): - # end of PNG image - raise EOFError - - def chunk_PLTE(self, pos, length): - # palette - s = ImageFile._safe_read(self.fp, length) - if self.im_mode == "P": - self.im_palette = "RGB", s - return s - - def chunk_tRNS(self, pos, length): - # transparency - s = ImageFile._safe_read(self.fp, length) - if self.im_mode == "P": - if _simple_palette.match(s): - # tRNS contains only one full-transparent entry, - # other entries are full opaque - i = s.find(b"\0") - if i >= 0: - self.im_info["transparency"] = i - else: - # otherwise, we have a byte string with one alpha value - # for each palette entry - self.im_info["transparency"] = s - elif self.im_mode in ("1", "L", "I"): - self.im_info["transparency"] = i16(s) - elif self.im_mode == "RGB": - self.im_info["transparency"] = i16(s), i16(s, 2), i16(s, 4) - return s - - def chunk_gAMA(self, pos, length): - # gamma setting - s = ImageFile._safe_read(self.fp, length) - self.im_info["gamma"] = i32(s) / 100000.0 - return s - - def chunk_cHRM(self, pos, length): - # chromaticity, 8 unsigned ints, actual value is scaled by 100,000 - # WP x,y, Red x,y, Green x,y Blue x,y - - s = ImageFile._safe_read(self.fp, length) - raw_vals = struct.unpack(">%dI" % (len(s) // 4), s) - self.im_info["chromaticity"] = tuple(elt / 100000.0 for elt in raw_vals) - return s - - def chunk_sRGB(self, pos, length): - # srgb rendering intent, 1 byte - # 0 perceptual - # 1 relative colorimetric - # 2 saturation - # 3 absolute colorimetric - - s = ImageFile._safe_read(self.fp, length) - if length < 1: - if ImageFile.LOAD_TRUNCATED_IMAGES: - return s - msg = "Truncated sRGB chunk" - raise ValueError(msg) - self.im_info["srgb"] = s[0] - return s - - def chunk_pHYs(self, pos, length): - # pixels per unit - s = ImageFile._safe_read(self.fp, length) - if length < 9: - if ImageFile.LOAD_TRUNCATED_IMAGES: - return s - msg = "Truncated pHYs chunk" - raise ValueError(msg) - px, py = i32(s, 0), i32(s, 4) - unit = s[8] - if unit == 1: # meter - dpi = px * 0.0254, py * 0.0254 - self.im_info["dpi"] = dpi - elif unit == 0: - self.im_info["aspect"] = px, py - return s - - def chunk_tEXt(self, pos, length): - # text - s = ImageFile._safe_read(self.fp, length) - try: - k, v = s.split(b"\0", 1) - except ValueError: - # fallback for broken tEXt tags - k = s - v = b"" - if k: - k = k.decode("latin-1", "strict") - v_str = v.decode("latin-1", "replace") - - self.im_info[k] = v if k == "exif" else v_str - self.im_text[k] = v_str - self.check_text_memory(len(v_str)) - - return s - - def chunk_zTXt(self, pos, length): - # compressed text - s = ImageFile._safe_read(self.fp, length) - try: - k, v = s.split(b"\0", 1) - except ValueError: - k = s - v = b"" - if v: - comp_method = v[0] - else: - comp_method = 0 - if comp_method != 0: - msg = f"Unknown compression method {comp_method} in zTXt chunk" - raise SyntaxError(msg) - try: - v = _safe_zlib_decompress(v[1:]) - except ValueError: - if ImageFile.LOAD_TRUNCATED_IMAGES: - v = b"" - else: - raise - except zlib.error: - v = b"" - - if k: - k = k.decode("latin-1", "strict") - v = v.decode("latin-1", "replace") - - self.im_info[k] = self.im_text[k] = v - self.check_text_memory(len(v)) - - return s - - def chunk_iTXt(self, pos, length): - # international text - r = s = ImageFile._safe_read(self.fp, length) - try: - k, r = r.split(b"\0", 1) - except ValueError: - return s - if len(r) < 2: - return s - cf, cm, r = r[0], r[1], r[2:] - try: - lang, tk, v = r.split(b"\0", 2) - except ValueError: - return s - if cf != 0: - if cm == 0: - try: - v = _safe_zlib_decompress(v) - except ValueError: - if ImageFile.LOAD_TRUNCATED_IMAGES: - return s - else: - raise - except zlib.error: - return s - else: - return s - try: - k = k.decode("latin-1", "strict") - lang = lang.decode("utf-8", "strict") - tk = tk.decode("utf-8", "strict") - v = v.decode("utf-8", "strict") - except UnicodeError: - return s - - self.im_info[k] = self.im_text[k] = iTXt(v, lang, tk) - self.check_text_memory(len(v)) - - return s - - def chunk_eXIf(self, pos, length): - s = ImageFile._safe_read(self.fp, length) - self.im_info["exif"] = b"Exif\x00\x00" + s - return s - - # APNG chunks - def chunk_acTL(self, pos, length): - s = ImageFile._safe_read(self.fp, length) - if length < 8: - if ImageFile.LOAD_TRUNCATED_IMAGES: - return s - msg = "APNG contains truncated acTL chunk" - raise ValueError(msg) - if self.im_n_frames is not None: - self.im_n_frames = None - warnings.warn("Invalid APNG, will use default PNG image if possible") - return s - n_frames = i32(s) - if n_frames == 0 or n_frames > 0x80000000: - warnings.warn("Invalid APNG, will use default PNG image if possible") - return s - self.im_n_frames = n_frames - self.im_info["loop"] = i32(s, 4) - self.im_custom_mimetype = "image/apng" - return s - - def chunk_fcTL(self, pos, length): - s = ImageFile._safe_read(self.fp, length) - if length < 26: - if ImageFile.LOAD_TRUNCATED_IMAGES: - return s - msg = "APNG contains truncated fcTL chunk" - raise ValueError(msg) - seq = i32(s) - if (self._seq_num is None and seq != 0) or ( - self._seq_num is not None and self._seq_num != seq - 1 - ): - msg = "APNG contains frame sequence errors" - raise SyntaxError(msg) - self._seq_num = seq - width, height = i32(s, 4), i32(s, 8) - px, py = i32(s, 12), i32(s, 16) - im_w, im_h = self.im_size - if px + width > im_w or py + height > im_h: - msg = "APNG contains invalid frames" - raise SyntaxError(msg) - self.im_info["bbox"] = (px, py, px + width, py + height) - delay_num, delay_den = i16(s, 20), i16(s, 22) - if delay_den == 0: - delay_den = 100 - self.im_info["duration"] = float(delay_num) / float(delay_den) * 1000 - self.im_info["disposal"] = s[24] - self.im_info["blend"] = s[25] - return s - - def chunk_fdAT(self, pos, length): - if length < 4: - if ImageFile.LOAD_TRUNCATED_IMAGES: - s = ImageFile._safe_read(self.fp, length) - return s - msg = "APNG contains truncated fDAT chunk" - raise ValueError(msg) - s = ImageFile._safe_read(self.fp, 4) - seq = i32(s) - if self._seq_num != seq - 1: - msg = "APNG contains frame sequence errors" - raise SyntaxError(msg) - self._seq_num = seq - return self.chunk_IDAT(pos + 4, length - 4) - - -# -------------------------------------------------------------------- -# PNG reader - - -def _accept(prefix): - return prefix[:8] == _MAGIC - - -## -# Image plugin for PNG images. - - -class PngImageFile(ImageFile.ImageFile): - format = "PNG" - format_description = "Portable network graphics" - - def _open(self): - if not _accept(self.fp.read(8)): - msg = "not a PNG file" - raise SyntaxError(msg) - self._fp = self.fp - self.__frame = 0 - - # - # Parse headers up to the first IDAT or fDAT chunk - - self.private_chunks = [] - self.png = PngStream(self.fp) - - while True: - # - # get next chunk - - cid, pos, length = self.png.read() - - try: - s = self.png.call(cid, pos, length) - except EOFError: - break - except AttributeError: - logger.debug("%r %s %s (unknown)", cid, pos, length) - s = ImageFile._safe_read(self.fp, length) - if cid[1:2].islower(): - self.private_chunks.append((cid, s)) - - self.png.crc(cid, s) - - # - # Copy relevant attributes from the PngStream. An alternative - # would be to let the PngStream class modify these attributes - # directly, but that introduces circular references which are - # difficult to break if things go wrong in the decoder... - # (believe me, I've tried ;-) - - self.mode = self.png.im_mode - self._size = self.png.im_size - self.info = self.png.im_info - self._text = None - self.tile = self.png.im_tile - self.custom_mimetype = self.png.im_custom_mimetype - self.n_frames = self.png.im_n_frames or 1 - self.default_image = self.info.get("default_image", False) - - if self.png.im_palette: - rawmode, data = self.png.im_palette - self.palette = ImagePalette.raw(rawmode, data) - - if cid == b"fdAT": - self.__prepare_idat = length - 4 - else: - self.__prepare_idat = length # used by load_prepare() - - if self.png.im_n_frames is not None: - self._close_exclusive_fp_after_loading = False - self.png.save_rewind() - self.__rewind_idat = self.__prepare_idat - self.__rewind = self._fp.tell() - if self.default_image: - # IDAT chunk contains default image and not first animation frame - self.n_frames += 1 - self._seek(0) - self.is_animated = self.n_frames > 1 - - @property - def text(self): - # experimental - if self._text is None: - # iTxt, tEXt and zTXt chunks may appear at the end of the file - # So load the file to ensure that they are read - if self.is_animated: - frame = self.__frame - # for APNG, seek to the final frame before loading - self.seek(self.n_frames - 1) - self.load() - if self.is_animated: - self.seek(frame) - return self._text - - def verify(self): - """Verify PNG file""" - - if self.fp is None: - msg = "verify must be called directly after open" - raise RuntimeError(msg) - - # back up to beginning of IDAT block - self.fp.seek(self.tile[0][2] - 8) - - self.png.verify() - self.png.close() - - if self._exclusive_fp: - self.fp.close() - self.fp = None - - def seek(self, frame): - if not self._seek_check(frame): - return - if frame < self.__frame: - self._seek(0, True) - - last_frame = self.__frame - for f in range(self.__frame + 1, frame + 1): - try: - self._seek(f) - except EOFError as e: - self.seek(last_frame) - msg = "no more images in APNG file" - raise EOFError(msg) from e - - def _seek(self, frame, rewind=False): - if frame == 0: - if rewind: - self._fp.seek(self.__rewind) - self.png.rewind() - self.__prepare_idat = self.__rewind_idat - self.im = None - if self.pyaccess: - self.pyaccess = None - self.info = self.png.im_info - self.tile = self.png.im_tile - self.fp = self._fp - self._prev_im = None - self.dispose = None - self.default_image = self.info.get("default_image", False) - self.dispose_op = self.info.get("disposal") - self.blend_op = self.info.get("blend") - self.dispose_extent = self.info.get("bbox") - self.__frame = 0 - else: - if frame != self.__frame + 1: - msg = f"cannot seek to frame {frame}" - raise ValueError(msg) - - # ensure previous frame was loaded - self.load() - - if self.dispose: - self.im.paste(self.dispose, self.dispose_extent) - self._prev_im = self.im.copy() - - self.fp = self._fp - - # advance to the next frame - if self.__prepare_idat: - ImageFile._safe_read(self.fp, self.__prepare_idat) - self.__prepare_idat = 0 - frame_start = False - while True: - self.fp.read(4) # CRC - - try: - cid, pos, length = self.png.read() - except (struct.error, SyntaxError): - break - - if cid == b"IEND": - msg = "No more images in APNG file" - raise EOFError(msg) - if cid == b"fcTL": - if frame_start: - # there must be at least one fdAT chunk between fcTL chunks - msg = "APNG missing frame data" - raise SyntaxError(msg) - frame_start = True - - try: - self.png.call(cid, pos, length) - except UnicodeDecodeError: - break - except EOFError: - if cid == b"fdAT": - length -= 4 - if frame_start: - self.__prepare_idat = length - break - ImageFile._safe_read(self.fp, length) - except AttributeError: - logger.debug("%r %s %s (unknown)", cid, pos, length) - ImageFile._safe_read(self.fp, length) - - self.__frame = frame - self.tile = self.png.im_tile - self.dispose_op = self.info.get("disposal") - self.blend_op = self.info.get("blend") - self.dispose_extent = self.info.get("bbox") - - if not self.tile: - raise EOFError - - # setup frame disposal (actual disposal done when needed in the next _seek()) - if self._prev_im is None and self.dispose_op == Disposal.OP_PREVIOUS: - self.dispose_op = Disposal.OP_BACKGROUND - - if self.dispose_op == Disposal.OP_PREVIOUS: - self.dispose = self._prev_im.copy() - self.dispose = self._crop(self.dispose, self.dispose_extent) - elif self.dispose_op == Disposal.OP_BACKGROUND: - self.dispose = Image.core.fill(self.mode, self.size) - self.dispose = self._crop(self.dispose, self.dispose_extent) - else: - self.dispose = None - - def tell(self): - return self.__frame - - def load_prepare(self): - """internal: prepare to read PNG file""" - - if self.info.get("interlace"): - self.decoderconfig = self.decoderconfig + (1,) - - self.__idat = self.__prepare_idat # used by load_read() - ImageFile.ImageFile.load_prepare(self) - - def load_read(self, read_bytes): - """internal: read more image data""" - - while self.__idat == 0: - # end of chunk, skip forward to next one - - self.fp.read(4) # CRC - - cid, pos, length = self.png.read() - - if cid not in [b"IDAT", b"DDAT", b"fdAT"]: - self.png.push(cid, pos, length) - return b"" - - if cid == b"fdAT": - try: - self.png.call(cid, pos, length) - except EOFError: - pass - self.__idat = length - 4 # sequence_num has already been read - else: - self.__idat = length # empty chunks are allowed - - # read more data from this chunk - if read_bytes <= 0: - read_bytes = self.__idat - else: - read_bytes = min(read_bytes, self.__idat) - - self.__idat = self.__idat - read_bytes - - return self.fp.read(read_bytes) - - def load_end(self): - """internal: finished reading image data""" - if self.__idat != 0: - self.fp.read(self.__idat) - while True: - self.fp.read(4) # CRC - - try: - cid, pos, length = self.png.read() - except (struct.error, SyntaxError): - break - - if cid == b"IEND": - break - elif cid == b"fcTL" and self.is_animated: - # start of the next frame, stop reading - self.__prepare_idat = 0 - self.png.push(cid, pos, length) - break - - try: - self.png.call(cid, pos, length) - except UnicodeDecodeError: - break - except EOFError: - if cid == b"fdAT": - length -= 4 - ImageFile._safe_read(self.fp, length) - except AttributeError: - logger.debug("%r %s %s (unknown)", cid, pos, length) - s = ImageFile._safe_read(self.fp, length) - if cid[1:2].islower(): - self.private_chunks.append((cid, s, True)) - self._text = self.png.im_text - if not self.is_animated: - self.png.close() - self.png = None - else: - if self._prev_im and self.blend_op == Blend.OP_OVER: - updated = self._crop(self.im, self.dispose_extent) - if self.im.mode == "RGB" and "transparency" in self.info: - mask = updated.convert_transparent( - "RGBA", self.info["transparency"] - ) - else: - mask = updated.convert("RGBA") - self._prev_im.paste(updated, self.dispose_extent, mask) - self.im = self._prev_im - if self.pyaccess: - self.pyaccess = None - - def _getexif(self): - if "exif" not in self.info: - self.load() - if "exif" not in self.info and "Raw profile type exif" not in self.info: - return None - return self.getexif()._get_merged_dict() - - def getexif(self): - if "exif" not in self.info: - self.load() - - return super().getexif() - - def getxmp(self): - """ - Returns a dictionary containing the XMP tags. - Requires defusedxml to be installed. - - :returns: XMP tags in a dictionary. - """ - return ( - self._getxmp(self.info["XML:com.adobe.xmp"]) - if "XML:com.adobe.xmp" in self.info - else {} - ) - - -# -------------------------------------------------------------------- -# PNG writer - -_OUTMODES = { - # supported PIL modes, and corresponding rawmodes/bits/color combinations - "1": ("1", b"\x01\x00"), - "L;1": ("L;1", b"\x01\x00"), - "L;2": ("L;2", b"\x02\x00"), - "L;4": ("L;4", b"\x04\x00"), - "L": ("L", b"\x08\x00"), - "LA": ("LA", b"\x08\x04"), - "I": ("I;16B", b"\x10\x00"), - "I;16": ("I;16B", b"\x10\x00"), - "P;1": ("P;1", b"\x01\x03"), - "P;2": ("P;2", b"\x02\x03"), - "P;4": ("P;4", b"\x04\x03"), - "P": ("P", b"\x08\x03"), - "RGB": ("RGB", b"\x08\x02"), - "RGBA": ("RGBA", b"\x08\x06"), -} - - -def putchunk(fp, cid, *data): - """Write a PNG chunk (including CRC field)""" - - data = b"".join(data) - - fp.write(o32(len(data)) + cid) - fp.write(data) - crc = _crc32(data, _crc32(cid)) - fp.write(o32(crc)) - - -class _idat: - # wrap output from the encoder in IDAT chunks - - def __init__(self, fp, chunk): - self.fp = fp - self.chunk = chunk - - def write(self, data): - self.chunk(self.fp, b"IDAT", data) - - -class _fdat: - # wrap encoder output in fdAT chunks - - def __init__(self, fp, chunk, seq_num): - self.fp = fp - self.chunk = chunk - self.seq_num = seq_num - - def write(self, data): - self.chunk(self.fp, b"fdAT", o32(self.seq_num), data) - self.seq_num += 1 - - -def _write_multiple_frames(im, fp, chunk, rawmode, default_image, append_images): - duration = im.encoderinfo.get("duration", im.info.get("duration", 0)) - loop = im.encoderinfo.get("loop", im.info.get("loop", 0)) - disposal = im.encoderinfo.get("disposal", im.info.get("disposal", Disposal.OP_NONE)) - blend = im.encoderinfo.get("blend", im.info.get("blend", Blend.OP_SOURCE)) - - if default_image: - chain = itertools.chain(append_images) - else: - chain = itertools.chain([im], append_images) - - im_frames = [] - frame_count = 0 - for im_seq in chain: - for im_frame in ImageSequence.Iterator(im_seq): - if im_frame.mode == rawmode: - im_frame = im_frame.copy() - else: - if rawmode == "P": - im_frame = im_frame.convert(rawmode, palette=im.palette) - else: - im_frame = im_frame.convert(rawmode) - encoderinfo = im.encoderinfo.copy() - if isinstance(duration, (list, tuple)): - encoderinfo["duration"] = duration[frame_count] - if isinstance(disposal, (list, tuple)): - encoderinfo["disposal"] = disposal[frame_count] - if isinstance(blend, (list, tuple)): - encoderinfo["blend"] = blend[frame_count] - frame_count += 1 - - if im_frames: - previous = im_frames[-1] - prev_disposal = previous["encoderinfo"].get("disposal") - prev_blend = previous["encoderinfo"].get("blend") - if prev_disposal == Disposal.OP_PREVIOUS and len(im_frames) < 2: - prev_disposal = Disposal.OP_BACKGROUND - - if prev_disposal == Disposal.OP_BACKGROUND: - base_im = previous["im"].copy() - dispose = Image.core.fill("RGBA", im.size, (0, 0, 0, 0)) - bbox = previous["bbox"] - if bbox: - dispose = dispose.crop(bbox) - else: - bbox = (0, 0) + im.size - base_im.paste(dispose, bbox) - elif prev_disposal == Disposal.OP_PREVIOUS: - base_im = im_frames[-2]["im"] - else: - base_im = previous["im"] - delta = ImageChops.subtract_modulo( - im_frame.convert("RGBA"), base_im.convert("RGBA") - ) - bbox = delta.getbbox(alpha_only=False) - if ( - not bbox - and prev_disposal == encoderinfo.get("disposal") - and prev_blend == encoderinfo.get("blend") - ): - previous["encoderinfo"]["duration"] += encoderinfo.get( - "duration", duration - ) - continue - else: - bbox = None - if "duration" not in encoderinfo: - encoderinfo["duration"] = duration - im_frames.append({"im": im_frame, "bbox": bbox, "encoderinfo": encoderinfo}) - - # animation control - chunk( - fp, - b"acTL", - o32(len(im_frames)), # 0: num_frames - o32(loop), # 4: num_plays - ) - - # default image IDAT (if it exists) - if default_image: - ImageFile._save(im, _idat(fp, chunk), [("zip", (0, 0) + im.size, 0, rawmode)]) - - seq_num = 0 - for frame, frame_data in enumerate(im_frames): - im_frame = frame_data["im"] - if not frame_data["bbox"]: - bbox = (0, 0) + im_frame.size - else: - bbox = frame_data["bbox"] - im_frame = im_frame.crop(bbox) - size = im_frame.size - encoderinfo = frame_data["encoderinfo"] - frame_duration = int(round(encoderinfo["duration"])) - frame_disposal = encoderinfo.get("disposal", disposal) - frame_blend = encoderinfo.get("blend", blend) - # frame control - chunk( - fp, - b"fcTL", - o32(seq_num), # sequence_number - o32(size[0]), # width - o32(size[1]), # height - o32(bbox[0]), # x_offset - o32(bbox[1]), # y_offset - o16(frame_duration), # delay_numerator - o16(1000), # delay_denominator - o8(frame_disposal), # dispose_op - o8(frame_blend), # blend_op - ) - seq_num += 1 - # frame data - if frame == 0 and not default_image: - # first frame must be in IDAT chunks for backwards compatibility - ImageFile._save( - im_frame, - _idat(fp, chunk), - [("zip", (0, 0) + im_frame.size, 0, rawmode)], - ) - else: - fdat_chunks = _fdat(fp, chunk, seq_num) - ImageFile._save( - im_frame, - fdat_chunks, - [("zip", (0, 0) + im_frame.size, 0, rawmode)], - ) - seq_num = fdat_chunks.seq_num - - -def _save_all(im, fp, filename): - _save(im, fp, filename, save_all=True) - - -def _save(im, fp, filename, chunk=putchunk, save_all=False): - # save an image to disk (called by the save method) - - if save_all: - default_image = im.encoderinfo.get( - "default_image", im.info.get("default_image") - ) - modes = set() - append_images = im.encoderinfo.get("append_images", []) - if default_image: - chain = itertools.chain(append_images) - else: - chain = itertools.chain([im], append_images) - for im_seq in chain: - for im_frame in ImageSequence.Iterator(im_seq): - modes.add(im_frame.mode) - for mode in ("RGBA", "RGB", "P"): - if mode in modes: - break - else: - mode = modes.pop() - else: - mode = im.mode - - if mode == "P": - # - # attempt to minimize storage requirements for palette images - if "bits" in im.encoderinfo: - # number of bits specified by user - colors = min(1 << im.encoderinfo["bits"], 256) - else: - # check palette contents - if im.palette: - colors = max(min(len(im.palette.getdata()[1]) // 3, 256), 1) - else: - colors = 256 - - if colors <= 16: - if colors <= 2: - bits = 1 - elif colors <= 4: - bits = 2 - else: - bits = 4 - mode = f"{mode};{bits}" - - # encoder options - im.encoderconfig = ( - im.encoderinfo.get("optimize", False), - im.encoderinfo.get("compress_level", -1), - im.encoderinfo.get("compress_type", -1), - im.encoderinfo.get("dictionary", b""), - ) - - # get the corresponding PNG mode - try: - rawmode, mode = _OUTMODES[mode] - except KeyError as e: - msg = f"cannot write mode {mode} as PNG" - raise OSError(msg) from e - - # - # write minimal PNG file - - fp.write(_MAGIC) - - chunk( - fp, - b"IHDR", - o32(im.size[0]), # 0: size - o32(im.size[1]), - mode, # 8: depth/type - b"\0", # 10: compression - b"\0", # 11: filter category - b"\0", # 12: interlace flag - ) - - chunks = [b"cHRM", b"gAMA", b"sBIT", b"sRGB", b"tIME"] - - icc = im.encoderinfo.get("icc_profile", im.info.get("icc_profile")) - if icc: - # ICC profile - # according to PNG spec, the iCCP chunk contains: - # Profile name 1-79 bytes (character string) - # Null separator 1 byte (null character) - # Compression method 1 byte (0) - # Compressed profile n bytes (zlib with deflate compression) - name = b"ICC Profile" - data = name + b"\0\0" + zlib.compress(icc) - chunk(fp, b"iCCP", data) - - # You must either have sRGB or iCCP. - # Disallow sRGB chunks when an iCCP-chunk has been emitted. - chunks.remove(b"sRGB") - - info = im.encoderinfo.get("pnginfo") - if info: - chunks_multiple_allowed = [b"sPLT", b"iTXt", b"tEXt", b"zTXt"] - for info_chunk in info.chunks: - cid, data = info_chunk[:2] - if cid in chunks: - chunks.remove(cid) - chunk(fp, cid, data) - elif cid in chunks_multiple_allowed: - chunk(fp, cid, data) - elif cid[1:2].islower(): - # Private chunk - after_idat = info_chunk[2:3] - if not after_idat: - chunk(fp, cid, data) - - if im.mode == "P": - palette_byte_number = colors * 3 - palette_bytes = im.im.getpalette("RGB")[:palette_byte_number] - while len(palette_bytes) < palette_byte_number: - palette_bytes += b"\0" - chunk(fp, b"PLTE", palette_bytes) - - transparency = im.encoderinfo.get("transparency", im.info.get("transparency", None)) - - if transparency or transparency == 0: - if im.mode == "P": - # limit to actual palette size - alpha_bytes = colors - if isinstance(transparency, bytes): - chunk(fp, b"tRNS", transparency[:alpha_bytes]) - else: - transparency = max(0, min(255, transparency)) - alpha = b"\xFF" * transparency + b"\0" - chunk(fp, b"tRNS", alpha[:alpha_bytes]) - elif im.mode in ("1", "L", "I"): - transparency = max(0, min(65535, transparency)) - chunk(fp, b"tRNS", o16(transparency)) - elif im.mode == "RGB": - red, green, blue = transparency - chunk(fp, b"tRNS", o16(red) + o16(green) + o16(blue)) - else: - if "transparency" in im.encoderinfo: - # don't bother with transparency if it's an RGBA - # and it's in the info dict. It's probably just stale. - msg = "cannot use transparency for this mode" - raise OSError(msg) - else: - if im.mode == "P" and im.im.getpalettemode() == "RGBA": - alpha = im.im.getpalette("RGBA", "A") - alpha_bytes = colors - chunk(fp, b"tRNS", alpha[:alpha_bytes]) - - dpi = im.encoderinfo.get("dpi") - if dpi: - chunk( - fp, - b"pHYs", - o32(int(dpi[0] / 0.0254 + 0.5)), - o32(int(dpi[1] / 0.0254 + 0.5)), - b"\x01", - ) - - if info: - chunks = [b"bKGD", b"hIST"] - for info_chunk in info.chunks: - cid, data = info_chunk[:2] - if cid in chunks: - chunks.remove(cid) - chunk(fp, cid, data) - - exif = im.encoderinfo.get("exif") - if exif: - if isinstance(exif, Image.Exif): - exif = exif.tobytes(8) - if exif.startswith(b"Exif\x00\x00"): - exif = exif[6:] - chunk(fp, b"eXIf", exif) - - if save_all: - _write_multiple_frames(im, fp, chunk, rawmode, default_image, append_images) - else: - ImageFile._save(im, _idat(fp, chunk), [("zip", (0, 0) + im.size, 0, rawmode)]) - - if info: - for info_chunk in info.chunks: - cid, data = info_chunk[:2] - if cid[1:2].islower(): - # Private chunk - after_idat = info_chunk[2:3] - if after_idat: - chunk(fp, cid, data) - - chunk(fp, b"IEND", b"") - - if hasattr(fp, "flush"): - fp.flush() - - -# -------------------------------------------------------------------- -# PNG chunk converter - - -def getchunks(im, **params): - """Return a list of PNG chunks representing this image.""" - - class collector: - data = [] - - def write(self, data): - pass - - def append(self, chunk): - self.data.append(chunk) - - def append(fp, cid, *data): - data = b"".join(data) - crc = o32(_crc32(data, _crc32(cid))) - fp.append((cid, data, crc)) - - fp = collector() - - try: - im.encoderinfo = params - _save(im, fp, None, append) - finally: - del im.encoderinfo - - return fp.data - - -# -------------------------------------------------------------------- -# Registry - -Image.register_open(PngImageFile.format, PngImageFile, _accept) -Image.register_save(PngImageFile.format, _save) -Image.register_save_all(PngImageFile.format, _save_all) - -Image.register_extensions(PngImageFile.format, [".png", ".apng"]) - -Image.register_mime(PngImageFile.format, "image/png") diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cymem/tests/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cymem/tests/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/otlLib/maxContextCalc.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/otlLib/maxContextCalc.py deleted file mode 100644 index 03e7561b60f126bc19ff8b49ed2ebe7d6898286e..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/otlLib/maxContextCalc.py +++ /dev/null @@ -1,96 +0,0 @@ -__all__ = ["maxCtxFont"] - - -def maxCtxFont(font): - """Calculate the usMaxContext value for an entire font.""" - - maxCtx = 0 - for tag in ("GSUB", "GPOS"): - if tag not in font: - continue - table = font[tag].table - if not table.LookupList: - continue - for lookup in table.LookupList.Lookup: - for st in lookup.SubTable: - maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st) - return maxCtx - - -def maxCtxSubtable(maxCtx, tag, lookupType, st): - """Calculate usMaxContext based on a single lookup table (and an existing - max value). - """ - - # single positioning, single / multiple substitution - if (tag == "GPOS" and lookupType == 1) or ( - tag == "GSUB" and lookupType in (1, 2, 3) - ): - maxCtx = max(maxCtx, 1) - - # pair positioning - elif tag == "GPOS" and lookupType == 2: - maxCtx = max(maxCtx, 2) - - # ligatures - elif tag == "GSUB" and lookupType == 4: - for ligatures in st.ligatures.values(): - for ligature in ligatures: - maxCtx = max(maxCtx, ligature.CompCount) - - # context - elif (tag == "GPOS" and lookupType == 7) or (tag == "GSUB" and lookupType == 5): - maxCtx = maxCtxContextualSubtable(maxCtx, st, "Pos" if tag == "GPOS" else "Sub") - - # chained context - elif (tag == "GPOS" and lookupType == 8) or (tag == "GSUB" and lookupType == 6): - maxCtx = maxCtxContextualSubtable( - maxCtx, st, "Pos" if tag == "GPOS" else "Sub", "Chain" - ) - - # extensions - elif (tag == "GPOS" and lookupType == 9) or (tag == "GSUB" and lookupType == 7): - maxCtx = maxCtxSubtable(maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable) - - # reverse-chained context - elif tag == "GSUB" and lookupType == 8: - maxCtx = maxCtxContextualRule(maxCtx, st, "Reverse") - - return maxCtx - - -def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=""): - """Calculate usMaxContext based on a contextual feature subtable.""" - - if st.Format == 1: - for ruleset in getattr(st, "%s%sRuleSet" % (chain, ruleType)): - if ruleset is None: - continue - for rule in getattr(ruleset, "%s%sRule" % (chain, ruleType)): - if rule is None: - continue - maxCtx = maxCtxContextualRule(maxCtx, rule, chain) - - elif st.Format == 2: - for ruleset in getattr(st, "%s%sClassSet" % (chain, ruleType)): - if ruleset is None: - continue - for rule in getattr(ruleset, "%s%sClassRule" % (chain, ruleType)): - if rule is None: - continue - maxCtx = maxCtxContextualRule(maxCtx, rule, chain) - - elif st.Format == 3: - maxCtx = maxCtxContextualRule(maxCtx, st, chain) - - return maxCtx - - -def maxCtxContextualRule(maxCtx, st, chain): - """Calculate usMaxContext based on a contextual feature rule.""" - - if not chain: - return max(maxCtx, st.GlyphCount) - elif chain == "Reverse": - return max(maxCtx, st.GlyphCount + st.LookAheadGlyphCount) - return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/B_A_S_E_.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/B_A_S_E_.py deleted file mode 100644 index f468a963a1e2a8d503b57f4d7aeff12b8770cc67..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/B_A_S_E_.py +++ /dev/null @@ -1,5 +0,0 @@ -from .otBase import BaseTTXConverter - - -class table_B_A_S_E_(BaseTTXConverter): - pass diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_l_c_a_r.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_l_c_a_r.py deleted file mode 100644 index 1323b670d0c2e7a51e553ee8aa341af789898b1d..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_l_c_a_r.py +++ /dev/null @@ -1,5 +0,0 @@ -from .otBase import BaseTTXConverter - - -class table__l_c_a_r(BaseTTXConverter): - pass diff --git a/spaces/cihyFjudo/fairness-paper-search/Dynasty Warriors Gundam Pc Game Free 13 Bollywood Scans Game [PORTABLE].md b/spaces/cihyFjudo/fairness-paper-search/Dynasty Warriors Gundam Pc Game Free 13 Bollywood Scans Game [PORTABLE].md deleted file mode 100644 index 0cf5fcdeba6c5bffa2c978d04cff10730431046e..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Dynasty Warriors Gundam Pc Game Free 13 Bollywood Scans Game [PORTABLE].md +++ /dev/null @@ -1,6 +0,0 @@ -

    Dynasty Warriors Gundam Pc Game Free 13 bollywood scans game


    DOWNLOAD ✏ ✏ ✏ https://tinurli.com/2uwklZ



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fastapi/openapi/docs.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fastapi/openapi/docs.py deleted file mode 100644 index 81f67dcc5bf59d32c7c8e59d5f345002d114a9ef..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fastapi/openapi/docs.py +++ /dev/null @@ -1,203 +0,0 @@ -import json -from typing import Any, Dict, Optional - -from fastapi.encoders import jsonable_encoder -from starlette.responses import HTMLResponse - -swagger_ui_default_parameters = { - "dom_id": "#swagger-ui", - "layout": "BaseLayout", - "deepLinking": True, - "showExtensions": True, - "showCommonExtensions": True, -} - - -def get_swagger_ui_html( - *, - openapi_url: str, - title: str, - swagger_js_url: str = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui-bundle.js", - swagger_css_url: str = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui.css", - swagger_favicon_url: str = "https://fastapi.tiangolo.com/img/favicon.png", - oauth2_redirect_url: Optional[str] = None, - init_oauth: Optional[Dict[str, Any]] = None, - swagger_ui_parameters: Optional[Dict[str, Any]] = None, -) -> HTMLResponse: - current_swagger_ui_parameters = swagger_ui_default_parameters.copy() - if swagger_ui_parameters: - current_swagger_ui_parameters.update(swagger_ui_parameters) - - html = f""" - - - - - - {title} - - -
    -
    - - - - - - """ - return HTMLResponse(html) - - -def get_redoc_html( - *, - openapi_url: str, - title: str, - redoc_js_url: str = "https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js", - redoc_favicon_url: str = "https://fastapi.tiangolo.com/img/favicon.png", - with_google_fonts: bool = True, -) -> HTMLResponse: - html = f""" - - - - {title} - - - - """ - if with_google_fonts: - html += """ - - """ - html += f""" - - - - - - - - - - - """ - return HTMLResponse(html) - - -def get_swagger_ui_oauth2_redirect_html() -> HTMLResponse: - # copied from https://github.com/swagger-api/swagger-ui/blob/v4.14.0/dist/oauth2-redirect.html - html = """ - - - - Swagger UI: OAuth2 Redirect - - - - - - """ - return HTMLResponse(content=html) diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/_o_p_b_d.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/_o_p_b_d.py deleted file mode 100644 index b22af216bb2e2ddb8af1cd3f991d4ede69471076..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/_o_p_b_d.py +++ /dev/null @@ -1,6 +0,0 @@ -from .otBase import BaseTTXConverter - - -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html -class table__o_p_b_d(BaseTTXConverter): - pass diff --git a/spaces/cncanon/locusts/greeting.md b/spaces/cncanon/locusts/greeting.md deleted file mode 100644 index 2197a0e80139e08425edf4f01ff9fc862dff6d9a..0000000000000000000000000000000000000000 --- a/spaces/cncanon/locusts/greeting.md +++ /dev/null @@ -1,2 +0,0 @@ -![](https://static.wikia.nocookie.net/brotherhood-of-nod/images/a/a3/CNCTW_Kane.png) -Pass: `kane_lives!` \ No newline at end of file diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/avs3_parser.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/avs3_parser.c deleted file mode 100644 index a819b5783d63ed38eb5a93f085ca6285f4dcd693..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/avs3_parser.c +++ /dev/null @@ -1,179 +0,0 @@ -/* - * AVS3-P2/IEEE1857.10 video parser. - * Copyright (c) 2020 Zhenyu Wang - * Bingjie Han - * Huiwen Ren - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "avs3.h" -#include "get_bits.h" -#include "parser.h" - -static int avs3_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size) -{ - int pic_found = pc->frame_start_found; - uint32_t state = pc->state; - int cur = 0; - - if (!pic_found) { - for (; cur < buf_size; ++cur) { - state = (state << 8) | buf[cur]; - if (AVS3_ISPIC(buf[cur])){ - cur++; - pic_found = 1; - break; - } - } - } - - if (pic_found) { - if (!buf_size) - return END_NOT_FOUND; - for (; cur < buf_size; ++cur) { - state = (state << 8) | buf[cur]; - if ((state & 0xFFFFFF00) == 0x100 && AVS3_ISUNIT(state & 0xFF)) { - pc->frame_start_found = 0; - pc->state = -1; - return cur - 3; - } - } - } - - pc->frame_start_found = pic_found; - pc->state = state; - - return END_NOT_FOUND; -} - -static void parse_avs3_nal_units(AVCodecParserContext *s, const uint8_t *buf, - int buf_size, AVCodecContext *avctx) -{ - if (buf_size < 5) { - return; - } - - if (buf[0] == 0x0 && buf[1] == 0x0 && buf[2] == 0x1) { - if (buf[3] == AVS3_SEQ_START_CODE) { - GetBitContext gb; - int profile, ratecode, low_delay; - - init_get_bits8(&gb, buf + 4, buf_size - 4); - - s->key_frame = 1; - s->pict_type = AV_PICTURE_TYPE_I; - - profile = get_bits(&gb, 8); - // Skip bits: level(8) - // progressive(1) - // field(1) - // library(2) - // resv(1) - // width(14) - // resv(1) - // height(14) - // chroma(2) - // sampe_precision(3) - skip_bits(&gb, 47); - - if (profile == AVS3_PROFILE_BASELINE_MAIN10) { - int sample_precision = get_bits(&gb, 3); - if (sample_precision == 1) { - avctx->pix_fmt = AV_PIX_FMT_YUV420P; - } else if (sample_precision == 2) { - avctx->pix_fmt = AV_PIX_FMT_YUV420P10LE; - } else { - avctx->pix_fmt = AV_PIX_FMT_NONE; - } - } - - // Skip bits: resv(1) - // aspect(4) - skip_bits(&gb, 5); - - ratecode = get_bits(&gb, 4); - - // Skip bits: resv(1) - // bitrate_low(18) - // resv(1) - // bitrate_high(12) - skip_bits(&gb, 32); - - low_delay = get_bits(&gb, 1); - avctx->has_b_frames = FFMAX(avctx->has_b_frames, !low_delay); - - avctx->framerate.num = ff_avs3_frame_rate_tab[ratecode].num; - avctx->framerate.den = ff_avs3_frame_rate_tab[ratecode].den; - - s->width = s->coded_width = avctx->width; - s->height = s->coded_height = avctx->height; - - av_log(avctx, AV_LOG_DEBUG, - "AVS3 parse seq HDR: profile %d; coded size: %dx%d; frame rate code: %d\n", - profile, avctx->width, avctx->height, ratecode); - - } else if (buf[3] == AVS3_INTRA_PIC_START_CODE) { - s->key_frame = 1; - s->pict_type = AV_PICTURE_TYPE_I; - } else if (buf[3] == AVS3_INTER_PIC_START_CODE){ - s->key_frame = 0; - if (buf_size > 9) { - int pic_code_type = buf[8] & 0x3; - if (pic_code_type == 1 || pic_code_type == 3) { - s->pict_type = AV_PICTURE_TYPE_P; - } else { - s->pict_type = AV_PICTURE_TYPE_B; - } - } - } - } -} - - -static int avs3_parse(AVCodecParserContext *s, AVCodecContext *avctx, - const uint8_t **poutbuf, int *poutbuf_size, - const uint8_t *buf, int buf_size) -{ - ParseContext *pc = s->priv_data; - int next; - - if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) { - next = buf_size; - } else { - next = avs3_find_frame_end(pc, buf, buf_size); - if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) { - *poutbuf = NULL; - *poutbuf_size = 0; - return buf_size; - } - } - - parse_avs3_nal_units(s, buf, buf_size, avctx); - - *poutbuf = buf; - *poutbuf_size = buf_size; - - return next; -} - -const AVCodecParser ff_avs3_parser = { - .codec_ids = { AV_CODEC_ID_AVS3 }, - .priv_data_size = sizeof(ParseContext), - .parser_parse = avs3_parse, - .parser_close = ff_parse_close, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/avuidec.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/avuidec.c deleted file mode 100644 index ba157e167cd43d992de82c96208b63efee857cde..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/avuidec.c +++ /dev/null @@ -1,130 +0,0 @@ -/* - * AVID Meridien decoder - * - * Copyright (c) 2012 Carl Eugen Hoyos - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "avcodec.h" -#include "codec_internal.h" -#include "decode.h" -#include "libavutil/intreadwrite.h" - -static av_cold int avui_decode_init(AVCodecContext *avctx) -{ - avctx->pix_fmt = AV_PIX_FMT_YUVA422P; - return 0; -} - -static int avui_decode_frame(AVCodecContext *avctx, AVFrame *pic, - int *got_frame, AVPacket *avpkt) -{ - int ret; - const uint8_t *src = avpkt->data, *extradata = avctx->extradata; - const uint8_t *srca; - uint8_t *y, *u, *v, *a; - int transparent, interlaced = 1, skip, opaque_length, i, j, k; - uint32_t extradata_size = avctx->extradata_size; - - while (extradata_size >= 24) { - uint32_t atom_size = AV_RB32(extradata); - if (!memcmp(&extradata[4], "APRGAPRG0001", 12)) { - interlaced = extradata[19] != 1; - break; - } - if (atom_size && atom_size <= extradata_size) { - extradata += atom_size; - extradata_size -= atom_size; - } else { - break; - } - } - if (avctx->height == 486) { - skip = 10; - } else { - skip = 16; - } - opaque_length = 2 * avctx->width * (avctx->height + skip) + 4 * interlaced; - if (avpkt->size < opaque_length) { - av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n"); - return AVERROR(EINVAL); - } - transparent = avctx->bits_per_coded_sample == 32 && - avpkt->size >= opaque_length * 2 + 4; - srca = src + opaque_length + 5; - - if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) - return ret; - - pic->key_frame = 1; - pic->pict_type = AV_PICTURE_TYPE_I; - - if (!interlaced) { - src += avctx->width * skip; - srca += avctx->width * skip; - } - - for (i = 0; i < interlaced + 1; i++) { - src += avctx->width * skip; - srca += avctx->width * skip; - if (interlaced && avctx->height == 486) { - y = pic->data[0] + (1 - i) * pic->linesize[0]; - u = pic->data[1] + (1 - i) * pic->linesize[1]; - v = pic->data[2] + (1 - i) * pic->linesize[2]; - a = pic->data[3] + (1 - i) * pic->linesize[3]; - } else { - y = pic->data[0] + i * pic->linesize[0]; - u = pic->data[1] + i * pic->linesize[1]; - v = pic->data[2] + i * pic->linesize[2]; - a = pic->data[3] + i * pic->linesize[3]; - } - - for (j = 0; j < avctx->height >> interlaced; j++) { - for (k = 0; k < avctx->width >> 1; k++) { - u[ k ] = *src++; - y[2 * k ] = *src++; - a[2 * k ] = 0xFF - (transparent ? *srca++ : 0); - srca++; - v[ k ] = *src++; - y[2 * k + 1] = *src++; - a[2 * k + 1] = 0xFF - (transparent ? *srca++ : 0); - srca++; - } - - y += (interlaced + 1) * pic->linesize[0]; - u += (interlaced + 1) * pic->linesize[1]; - v += (interlaced + 1) * pic->linesize[2]; - a += (interlaced + 1) * pic->linesize[3]; - } - src += 4; - srca += 4; - } - *got_frame = 1; - - return avpkt->size; -} - -const FFCodec ff_avui_decoder = { - .p.name = "avui", - CODEC_LONG_NAME("Avid Meridien Uncompressed"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_AVUI, - .p.capabilities = AV_CODEC_CAP_DR1, - .init = avui_decode_init, - FF_CODEC_DECODE_CB(avui_decode_frame), -}; diff --git a/spaces/congsaPfin/Manga-OCR/logs/Baixe Clash Royale APK MOD e tenha dinheiro infinito em 2022.md b/spaces/congsaPfin/Manga-OCR/logs/Baixe Clash Royale APK MOD e tenha dinheiro infinito em 2022.md deleted file mode 100644 index 6a43a48afbb9fb54b2e802384486529b89ba50e6..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Baixe Clash Royale APK MOD e tenha dinheiro infinito em 2022.md +++ /dev/null @@ -1,129 +0,0 @@ -
    -

    Clash Royale APK Mod Dinheiro Infinito 2022: How to Download and Play

    -

    Are you a fan of strategy games? Do you want to enjoy unlimited gold, gems, and cards in Clash Royale? If yes, then you might be interested in Clash Royale APK Mod Dinheiro Infinito 2022. This is a modified version of the original game that gives you access to unlimited resources and features. In this article, we will tell you what Clash Royale is, what the mod apk is, how to download and install it, and how to play it. Let's get started!

    -

    clash royale apk mod dinheiro infinito 2022


    DOWNLOAD ✒ ✒ ✒ https://urlca.com/2uO8qF



    -

    What is Clash Royale?

    -

    Clash Royale is a popular strategy game by Supercell, the same company that created Clash of Clans, Brawl Stars, and Hay Day. It was released in 2016 and has since become one of the most downloaded and played games on mobile devices. It has over 100 million downloads on Google Play Store and has a rating of 4.3 out of 5 stars.

    -

    A popular strategy game by Supercell

    -

    Clash Royale is a multiplayer online battle arena (MOBA) game that combines elements of card games, tower defense, and real-time strategy. The game pits two players against each other in a 1v1 or 2v2 mode, where they have to destroy the enemy's towers and king tower while defending their own. The game is set in the same universe as Clash of Clans, featuring characters, troops, spells, and buildings from the original game.

    -

    The gameplay and features of Clash Royale

    -

    The gameplay of Clash Royale is simple but addictive. Each player has a deck of eight cards that they can use to deploy troops, spells, or buildings on the battlefield. Each card has a certain elixir cost, which is replenished over time. The player has to balance their elixir usage and card selection to gain an advantage over their opponent. The game also features chests, arenas, leagues, clans, tournaments, events, quests, and more.

    -

    clash royale mod apk unlimited money and gems 2022
    -clash royale hack apk download 2022 android
    -clash royale mod apk latest version 2022
    -clash royale apk mod tudo infinito 2022
    -clash royale mod apk private server 2022
    -clash royale hack apk mediafıre 2022
    -clash royale mod apk offline 2022
    -clash royale apk mod menu 2022
    -clash royale hack apk ios 2022
    -clash royale mod apk free download 2022
    -clash royale apk mod atualizado 2022
    -clash royale hack apk unlimited everything 2022
    -clash royale mod apk all cards unlocked 2022
    -clash royale apk mod dinheiro e gemas infinitas 2022
    -clash royale hack apk no root 2022
    -clash royale mod apk new update 2022
    -clash royale apk mod online 2022
    -clash royale hack apk mega knight 2022
    -clash royale mod apk unlimited troops 2022
    -clash royale apk mod com skins 2022
    -clash royale hack apk sin verificacion humana 2022
    -clash royale mod apk no ban 2022
    -clash royale apk mod original 2022
    -clash royale hack apk link direto 2022
    -clash royale mod apk unlimited elixir 2022
    -clash royale apk mod com pass Royale gratis 2022
    -clash royale hack apk sin root 2022
    -clash royale mod apk anti ban 2022
    -clash royale apk mod com todas as cartas liberadas 2022
    -clash royale hack apk servidor privado 2022
    -clash royale mod apk with unlimited chest 2022
    -clash royale apk mod com batalha de clãs 2022
    -clash royale hack apk sin conexion a internet 2022
    -clash royale mod apk working 2022
    -clash royale apk mod com novas cartas 2022
    -clash royale hack apk sin baneo 2022
    -clash royale mod apk unlimited gems and coins 2022
    -clash royale apk mod com modo espectador 2022
    -clash royale hack apk ultima version 2022
    -clash royale mod apk god mode 2022
    -clash royale apk mod com desafios infinitos 2022
    -clash royale hack apk sin contraseña 2022
    -clash royale mod apk with real players 2022
    -clash royale apk mod com torneios infinitos 2022
    -clash royale hack apk sin anuncios 2022
    -clash royale mod apk with legendary cards 2022
    -clash royale apk mod com gemas infinitas e ouro infinito e elixir infinito e pass Royale gratis e todas as cartas liberadas e modo espectador e batalha de clãs e desafios infinitos e torneios infinitos e skins e novas cartas e servidor privado e offline e online e anti ban e no root e mega knight e god mode e unlimited everything e link direto e mediafıre e sin verificacion humana e sin conexion a internet e sin baneo e sin contraseña e sin anuncios e ultima version e atualizado e original e working and free download and ios and android and private server and latest version and no ban and menu and all cards unlocked and new update and unlimited troops and chest and elixir and money and gems and coins and skins and pass Royale and clan wars and spectator mode and challenges and tournaments and legendary cards and real players and hack and mod and tudo infinito and dinheiro infinito and gemas infinitas and ouro infinito and elixir infinito in the year of our lord two thousand twenty two amen 🙏🏻🙏🏻🙏🏻

    -

    What is Clash Royale APK Mod Dinheiro Infinito 2022?

    -

    Clash Royale APK Mod Dinheiro Infinito 2022 is a modified version of the original game that gives you access to unlimited resources and features. It is also known as Clash Royale Hack or Clash Royale Cheat. It is not an official version of the game and is not endorsed by Supercell.

    -

    A modified version of the original game

    -

    The mod apk is created by third-party developers who modify the original game files to unlock some features that are otherwise restricted or paid. For example, the mod apk gives you unlimited gold, gems, and cards, which are the main currencies in the game. You can use them to upgrade your cards, unlock new cards, buy chests, enter tournaments, and more. You can also access all the new cards that are released in the game without waiting for them to be available in your region.

    -

    The benefits and risks of using the mod apk

    -

    The main benefit of using the mod apk is that you can enjoy the game without any limitations or costs. You can have fun with your friends and family without worrying about running out of resources or losing battles. You can also experiment with different decks and strategies without risking your trophies or rank.The main risk of using the mod apk is that it can be detected and banned by Supercell. The mod apk is not compatible with the official game servers and can cause errors, crashes, or glitches. You might also lose your progress, data, or account if you use the mod apk. Moreover, the mod apk can expose your device to malware, viruses, or hackers who can steal your personal information or damage your device. Therefore, you should use the mod apk at your own risk and discretion.

    -

    How to download and install Clash Royale APK Mod Dinheiro Infinito 2022?

    -

    If you want to try the mod apk, you will need to download and install it on your device. The process is different for Android and PC devices, so we will explain both of them in detail.

    -

    The steps to follow for Android devices

    -

    For Android devices, you will need to follow these steps:

    -
      -
    1. Go to the website where you can download the mod apk file. You can search for it on Google or use this link: .
    2. -
    3. Click on the download button and wait for the file to be downloaded on your device.
    4. -
    5. Go to your device settings and enable the option to install apps from unknown sources. This will allow you to install the mod apk file.
    6. -
    7. Go to your file manager and locate the mod apk file. Tap on it and follow the instructions to install it.
    8. -
    9. Once the installation is complete, you can launch the game and enjoy the mod features.
    10. -
    -

    The steps to follow for PC devices using Bluestacks

    -

    For PC devices, you will need to use an Android emulator like Bluestacks to run the mod apk file. Bluestacks is a software that allows you to play Android games and apps on your PC. You can download it from here: . After downloading and installing Bluestacks, you will need to follow these steps:

    -
      -
    1. Go to the website where you can download the mod apk file. You can search for it on Google or use this link: .
    2. -
    3. Click on the download button and wait for the file to be downloaded on your PC.
    4. -
    5. Open Bluestacks and go to the My Apps tab. Click on the Install APK button at the bottom right corner.
    6. -
    7. Select the mod apk file from your PC and click on Open. Bluestacks will install the mod apk file on its emulator.
    8. -
    9. Once the installation is complete, you can launch the game and enjoy the mod features.
    10. -
    -

    How to play Clash Royale APK Mod Dinheiro Infinito 2022?

    -

    Now that you have downloaded and installed the mod apk file, you might be wondering how to play it. The gameplay of the mod apk is similar to the original game, but with some differences. Here are some tips and tricks for beginners and experts alike.

    -

    The basic tips and tricks for beginners

    -

    If you are new to Clash Royale, here are some basic tips and tricks that will help you get started:

    -
      -
    • Learn the basics of the game by completing the tutorial and watching some videos online.
    • -
    • Build a balanced deck of cards that can counter different types of enemies and situations.
    • -
    • Use your elixir wisely and don't spam cards without a plan.
    • -
    • Try to gain an elixir advantage over your opponent by making positive trades or using cheap cards.
    • -
    • Protect your towers and king tower from enemy attacks and try to destroy theirs.
    • -
    • Use spells and buildings wisely and don't waste them on low-value targets.
    • -
    • Join a clan and chat with other players, request cards, donate cards, and participate in clan wars.
    • -
    • Have fun and don't get frustrated by losses or bad luck.
    • -
    -

    The advanced strategies and tactics for experts

    -

    If you are an expert in Clash Royale, here are some advanced strategies and tactics that will help you improve your skills:

    -
      -
    • Analyze your opponent's deck and play style and adapt your strategy accordingly.
    • -
    • Predict your opponent's moves and cards and surprise them with unexpected moves and cards.
    • -
    • Cycle your cards faster than your opponent by using low-cost cards or cycling cards.
    • -
    • Create pressure on both lanes by splitting your push or using dual-lane cards.
    • -
    • Use synergies between your cards to create powerful combos or counter-combos.
    • -
    • Use bait cards to lure out your opponent's counters or spells and then punish them with your main cards.
    • -
    • Use advanced techniques such as kiting, pig pushing, spell cycling, or tower trading to gain an edge over your opponent.
    • -
    • Watch replays of your own and other players' battles and learn from your mistakes and successes.
    • -
    • Keep up with the latest meta and trends and update your deck accordingly.
    • -
    • Practice with your friends, clanmates, or in friendly battles to hone your skills and test your deck.
    • -
    -

    Conclusion

    -

    Clash Royale APK Mod Dinheiro Infinito 2022 is a modified version of the original game that gives you access to unlimited resources and features. It can be fun and exciting to play, but it also comes with some risks and challenges. You will need to download and install the mod apk file on your device, either Android or PC, and follow the instructions to play it. You will also need to learn the basics and advanced strategies of the game to win battles and enjoy the game. We hope this article has helped you understand what Clash Royale APK Mod Dinheiro Infinito 2022 is, how to download and install it, and how to play it. If you have any questions or feedback, please let us know in the comments below. Thank you for reading and happy gaming!

    -

    FAQs

    -

    Here are some frequently asked questions about Clash Royale APK Mod Dinheiro Infinito 2022:

    -
      -
    1. Is Clash Royale APK Mod Dinheiro Infinito 2022 safe to use?
    2. -

      Clash Royale APK Mod Dinheiro Infinito 2022 is not an official version of the game and is not endorsed by Supercell. It can be detected and banned by Supercell, and it can expose your device to malware, viruses, or hackers. Therefore, you should use it at your own risk and discretion.

      -
    3. Can I play Clash Royale APK Mod Dinheiro Infinito 2022 with my friends?
    4. -

      Yes, you can play Clash Royale APK Mod Dinheiro Infinito 2022 with your friends who also have the mod apk installed on their devices. However, you cannot play with players who have the original game installed, as the mod apk is not compatible with the official game servers.

      -
    5. Can I update Clash Royale APK Mod Dinheiro Infinito 2022?
    6. -

      No, you cannot update Clash Royale APK Mod Dinheiro Infinito 2022 from the Google Play Store or the App Store, as it is not an official version of the game. You will need to download and install the latest version of the mod apk file from a reliable website whenever there is a new update available.

      -
    7. Can I restore my progress or data if I delete Clash Royale APK Mod Dinheiro Infinito 2022?
    8. -

      No, you cannot restore your progress or data if you delete Clash Royale APK Mod Dinheiro Infinito 2022 from your device. The mod apk does not sync with your Google Play or Game Center account, and it does not save your data on the cloud. Therefore, you will lose all your progress and data if you uninstall the mod apk.

      -
    9. Can I switch between Clash Royale APK Mod Dinheiro Infinito 2022 and the original game?
    10. -

      No, you cannot switch between Clash Royale APK Mod Dinheiro Infinito 2022 and the original game on the same device. The mod apk will overwrite the original game files on your device, and you will not be able to run both versions simultaneously. If you want to switch between them, you will need to uninstall one version and install another version every time.

      -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download High-Quality 3D Textures for Free - Seamless PBR and More.md b/spaces/congsaPfin/Manga-OCR/logs/Download High-Quality 3D Textures for Free - Seamless PBR and More.md deleted file mode 100644 index 66f7ec5584b272ad56a643b788e52e75841b5fdb..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download High-Quality 3D Textures for Free - Seamless PBR and More.md +++ /dev/null @@ -1,170 +0,0 @@ - -

    Texture Free Download 3D: How to Find and Use High-Quality Assets for Your Projects

    -

    If you are a 3D artist, designer, or hobbyist, you know how important it is to have realistic and detailed textures for your models and scenes. Textures can make or break the visual appeal and immersion of your 3D creations. But where can you find high-quality textures for free? And how can you use them effectively in your projects? In this article, we will answer these questions and more. We will explain what 3D textures are, why they are important, how to find them online, and how to use them in your projects. By the end of this article, you will have a better understanding of how to use texture free download 3d assets to enhance your 3D work.

    -

    texture free download 3d


    Download Zip ===== https://urlca.com/2uO5uJ



    -

    What are 3D textures and why are they important?

    -

    The definition and types of 3D textures

    -

    A texture is a digital image that is mapped onto a surface of a 3D object to give it color, detail, and realism. Textures can be created from photographs, paintings, drawings, or computer-generated images. There are different types of textures that serve different purposes, such as:

    -
      -
    • Diffuse: This is the base color of the surface, which determines how it reflects light.
    • -
    • Normal: This is a map that simulates the bumps and dents of the surface, which affects how it reacts to light and shadows.
    • -
    • Specular: This is a map that controls the shininess and reflectivity of the surface, which affects how it looks when light hits it.
    • -
    • Roughness: This is a map that defines how rough or smooth the surface is, which affects how it scatters light.
    • -
    • Metallic: This is a map that indicates whether the surface is metallic or non-metallic, which affects how it reflects light.
    • -
    • Emissive: This is a map that makes the surface glow or emit light.
    • -
    • Displacement: This is a map that modifies the shape of the surface by adding or subtracting geometry.
    • -
    -

    By combining different types of textures, you can create realistic and complex materials for your 3D objects.

    -

    The benefits and challenges of using 3D textures

    -

    Using 3D textures can have many benefits for your projects, such as:

    -
      -
    • Enhancing the realism and detail of your models and scenes: Textures can add depth, variation, and richness to your surfaces, making them look more believable and appealing.
    • -
    • Saving time and resources: Textures can reduce the need for modeling every detail of your objects, which can save you time and effort. Textures can also optimize your performance by reducing the polygon count of your models.
    • -
    • Expressing your creativity and style: Textures can help you create unique and original materials for your objects, which can reflect your artistic vision and personality.
    • -
    -

    However, using 3D textures also comes with some challenges, such as:

    -
      -
    • Finding high-quality and suitable textures for your needs: Not all textures are created equal. Some may be low-resolution, poorly made, or incompatible with your software or hardware. Some may also have legal or ethical issues

      such as copyright infringement, plagiarism, or inappropriate content. Finding free textures that meet your quality and suitability standards can be challenging and time-consuming.

    • -
    • Applying and adjusting textures to your objects and scenes: Not all textures are ready to use out of the box. Some may require editing, resizing, or tweaking to fit your objects and scenes. Some may also need additional settings or adjustments to work well with your lighting, camera, or rendering engine. Applying and adjusting textures can be a complex and technical process that requires skill and experience.
    • -
    -

    Despite these challenges, using 3D textures can be rewarding and fun if you know how to find and use them properly. In the next sections, we will show you how to do that.

    -

    free 3d texture library
    -free pbr textures for 3d
    -free seamless textures 3d
    -free 3d texture sites
    -free 3d texture packs
    -free 3d texture generator
    -free 3d texture blender
    -free 3d texture maya
    -free 3d texture photoshop
    -free 3d texture unity
    -free 3d texture unreal engine
    -free 3d texture cinema 4d
    -free 3d texture sketchup
    -free 3d texture zbrush
    -free 3d texture substance painter
    -free 3d texture wood
    -free 3d texture metal
    -free 3d texture stone
    -free 3d texture brick
    -free 3d texture grass
    -free 3d texture dirt
    -free 3d texture sand
    -free 3d texture water
    -free 3d texture sky
    -free 3d texture clouds
    -free 3d texture fire
    -free 3d texture smoke
    -free 3d texture snow
    -free 3d texture ice
    -free 3d texture glass
    -free 3d texture marble
    -free 3d texture concrete
    -free 3d texture leather
    -free 3d texture fabric
    -free 3d texture paper
    -free 3d texture plastic
    -free 3d texture rubber
    -free 3d texture gold
    -free 3d texture silver
    -free 3d texture copper
    -free 3d texture rust
    -free 3d texture paint
    -free 3d texture graffiti
    -free 3d texture tiles
    -free 3d texture rock
    -free 3d texture asphalt
    -free 3d texture road
    -free 3d texture wall
    -free 3d texture floor

    -

    How to find free 3D textures online

    -

    The criteria and sources for choosing free 3D textures

    -

    When looking for free 3D textures online, you should consider the following criteria:

    -
      -
    • Resolution: This is the size of the texture image in pixels, which determines how sharp and detailed it looks. Higher resolution textures are better for close-up or large-scale objects, while lower resolution textures are better for distant or small-scale objects. You should choose a resolution that matches your object size and scene scale, as well as your hardware capabilities and performance goals.
    • -
    • Format: This is the file type of the texture image, which determines how it is stored and compressed. Different formats have different advantages and disadvantages in terms of quality, size, compatibility, and flexibility. Some common formats are JPG, PNG, TGA, BMP, TIFF, PSD, EXR, HDR, etc. You should choose a format that works well with your software and hardware, as well as your editing and rendering needs.
    • -
    • Type: This is the category of the texture image, which determines what kind of surface it represents. Different types of textures have different characteristics and uses, such as diffuse, normal, specular, roughness, metallic, emissive, displacement, etc. You should choose a type that suits your material and lighting requirements, as well as your artistic style and vision.
    • -
    • License: This is the legal agreement that governs how you can use the texture image. Different licenses have different terms and conditions regarding attribution, modification, distribution, commercial use, etc. Some common licenses are CC0 (public domain), CC BY (attribution), CC BY-SA (attribution-share alike), CC BY-ND (attribution-no derivatives), CC BY-NC (attribution-noncommercial), CC BY-NC-SA (attribution-noncommercial-share alike), CC BY-NC-ND (attribution-noncommercial-no derivatives), etc. You should choose a license that respects the original creator's rights and preferences, as well as your own rights and preferences.
    • -
    -

    There are many sources where you can find free 3D textures online, such as:

    -
      -
    • Search engines: You can use search engines like Google or Bing to find free 3D textures by using keywords like "texture free download 3d", "free 3d texture pack", "free 3d texture library", etc. You can also use filters like image size, color, type, license, etc. to narrow down your search results.
    • -
    • Social media: You can use social media platforms like Facebook or Twitter to find free 3D textures by following pages or accounts that share or promote free 3D resources. You can also use hashtags like #free3dtexture #free3dtextures #free3dtexturepack #free3dtexturelibrary etc. to discover new posts or tweets about free 3D textures.
    • -
    • Forums: You can use forums like Reddit or Quora to find free 3D textures by joining communities or groups that discuss or share free 3D resources. You can also ask questions or request recommendations about free 3D textures from other users.
    • -
    • Blogs: You can use blogs like Medium or WordPress to find free 3D textures by reading articles or posts that review or feature free 3D resources. You can also subscribe to newsletters or feeds that update you on new or popular free 3D textures.
    • -
    • YouTube: You can use YouTube to find free 3D textures by watching videos that showcase or demonstrate free 3D resources. You can also subscribe to channels or playlists that upload or curate free 3D textures.
    • -
    -

    However, not all sources are reliable or trustworthy. Some may provide low-quality or outdated textures, some may have broken or malicious links, some may have misleading or false information,

    some may have hidden or unfair terms and conditions, etc. Therefore, you should always be careful and cautious when downloading or using free 3D textures from online sources. You should always check the quality, format, type, and license of the textures before using them. You should also always credit the original creators and respect their rights and wishes.

    -

    The best websites for free 3D textures

    -

    Among the many sources of free 3D textures online, some stand out as the best in terms of quality, variety, compatibility, and usability. Here are some of the best websites for free 3D textures that we recommend:

    -

    Poliigon

    -

    Poliigon is a website that offers high-quality and realistic 3D textures for various categories, such as wood, metal, fabric, brick, concrete, etc. The textures are created by professional artists and photographers, and are available in different resolutions and formats. The textures also come with multiple maps, such as diffuse, normal, specular, roughness, metallic, etc. Poliigon offers a free account that gives you access to hundreds of free textures, as well as a paid account that gives you access to thousands of premium textures. You can use Poliigon textures for both personal and commercial projects, as long as you follow their license agreement.

    -

    Poly Haven

    -

    Poly Haven is a website that offers free and open source 3D assets, including textures, models, and HDRIs. The textures are created by a team of volunteers and contributors, and are available in high resolution and multiple formats. The textures also come with multiple maps, such as diffuse, normal, specular, roughness, metallic, etc. Poly Haven offers a simple and easy-to-use interface that lets you browse, download, and use the textures without any hassle. You can use Poly Haven textures for any purpose, as long as you follow their CC0 license.

    -

    ambientCG

    -

    ambientCG is a website that offers free and high-quality 3D textures for various categories, such as nature, urban, sci-fi, fantasy, etc. The textures are created by a community of artists and enthusiasts, and are available in different resolutions and formats. The textures also come with multiple maps, such as diffuse, normal, specular, roughness, metallic, etc. ambientCG offers a user-friendly and intuitive interface that lets you search, filter, preview, and download the textures without any trouble. You can use ambientCG textures for any purpose, as long as you follow their CC0 license.

    -

    How to use free 3D textures in your projects

    -

    The steps and tips for applying free 3D textures

    -

    Once you have downloaded the free 3D textures that you want to use, you need to apply them to your objects and scenes. The exact steps and methods may vary depending on your software and workflow, but here are some general steps and tips that you can follow:

    -
      -
    1. Import the textures into your software: You need to import the texture files into your software, either by dragging and dropping them, or by using the import or open functions. You may need to convert or compress the texture files if they are not compatible with your software or hardware.
    2. -
    3. Create a material for your object: You need to create a material for your object, either by using the default or preset materials, or by creating a custom material. A material is a set of properties and settings that define how an object looks and behaves in terms of color, texture, lighting, etc.
    4. -
    5. Assign the textures to the material: You need to assign the textures to the material, either by using the automatic or manual mapping functions. You may need to adjust the scale, rotation, offset, or tiling of the textures to fit your object and scene.
    6. -
    7. Adjust the material settings: You need to adjust the material settings, either by using the default or custom parameters. You may need to tweak the values or options of the texture maps, such as diffuse, normal, specular, roughness, metallic, etc. to achieve the desired effect and appearance.
    8. -
    9. Preview and render your object and scene: You need to preview and render your object and scene, either by using the viewport or the rendering engine. You may need to adjust the lighting, camera, or rendering settings to optimize your performance and quality.
    10. -
    -

    Some tips that you can use to apply free 3D textures effectively are:

    -
      -
    • Use multiple texture maps: Using multiple texture maps can add more realism and detail to your materials, as they can simulate different aspects of the surface, such as color, bumpiness, shininess, roughness, etc. However, you should also be careful not to use too many texture maps, as they can increase your memory usage and rendering time.
    • -
    • Use seamless textures: Using seamless textures can avoid visible seams or edges on your surfaces, as they can tile or repeat without any interruption or distortion. However, you should also be careful not to use too repetitive or uniform textures, as they can make your surfaces look boring or artificial. You should try to use textures that have some variation or randomness, or mix different textures together to create more diversity and interest.
    • -
    • Use appropriate textures for your objects and scenes: Using appropriate textures can enhance the realism and consistency of your objects and scenes, as they can match the shape, size, style, and context of your objects and scenes. However, you should also be careful not to use textures that are too specific or irrelevant, as they can make your objects and scenes look out of place or unrealistic. You should try to use textures that have a similar or complementary theme, mood, or genre to your objects and scenes.
    • -
    -

    The examples and tools for creating stunning 3D scenes with free textures

    -

    To give you some inspiration and guidance, here are some examples and tools that you can use to create stunning 3D scenes with free textures:

    - - - - - - - - - - - - - - - - - - - - - -
    ExampleToolDescription
    Forest scene by Andrew AlexanderBlenderBlender is a free and open source 3D creation suite that supports modeling, sculpting, texturing, animation, rendering, and more. You can use Blender to create realistic and beautiful 3D scenes with free textures from various sources. For example, this forest scene by Andrew Alexander uses free textures from Poliigon, Poly Haven, ambientCG, and other websites.
    City scene by Mohamed ChahinUnityUnity is a free and popular game engine that supports 2D and 3D development, as well as VR and AR applications. You can use Unity to create immersive and interactive 3D scenes with free textures from various sources. For example, this city scene by Mohamed Chahin uses free textures from Poliigon, Poly Haven, ambientCG, and other websites.
    Space scene by Mohamed ChahinUnreal EngineUnreal Engine is a free and powerful game engine that supports 2D and 3D development, as well as VR and AR applications. You can use Unreal Engine to create stunning and realistic 3D scenes with free textures from various sources. For example, this space scene by Mohamed Chahin uses free textures from Poliigon, Poly Haven, ambientCG, and other websites.
    -

    Conclusion

    -

    In conclusion, texture free download 3d assets are a great way to enhance your 3D projects with realistic and detailed surfaces. However, you need to know how to find and use them properly to achieve the best results. In this article, we have explained what 3D textures are, why they are important, how to find them online, and how to use them in your projects. We hope that this article has been helpful and informative for you. If you have any questions or comments about texture free download 3d assets, feel free to leave them below.

    -

    FAQs

    -

    Here are some frequently asked questions about texture free download 3d assets:

    -
      -
    • Q: What is the difference between texture and material?
    • -
    • A: A texture is a digital image that is mapped onto a surface of a 3D object to give it color, detail, and realism. A material is a set of properties and settings that define how an object looks and behaves in terms of color, texture, lighting, etc.
    • -
    • Q: How do I edit or create my own 3D textures?
    • -
    • A: You can edit or create your own 3D textures using various software tools, such as Photoshop, GIMP, Substance Painter, Quixel Mixer, etc. You can also use online tools, such as Texture Maker, Normal Map Online, etc. You can use these tools to modify, combine, or generate textures from scratch.
    • -
    • Q: How do I optimize my 3D textures for performance and quality?
    • -
    • A: You can optimize your 3D textures for performance and quality by using various techniques, such as:
    • -
        -
      • Choosing the right resolution and format: You should choose a resolution and format that matches your object size and scene scale, as well as your hardware capabilities and performance goals. Higher resolution and format textures may look better, but they may also consume more memory and rendering time.
      • -
      • Using texture compression: You can use texture compression to reduce the size and bandwidth of your textures, without losing too much quality. Texture compression can be done by using different algorithms, such as DXT, BC, ETC, ASTC, etc. You should choose a compression algorithm that works well with your software and hardware, as well as your texture type and quality.
      • -
      • Using texture atlases: You can use texture atlases to combine multiple textures into one large texture, which can reduce the number of draw calls and texture switches. Texture atlases can be created by using different methods, such as packing, tiling, blending, etc. You should choose a method that preserves the quality and functionality of your textures.
      • -
      • Using texture streaming: You can use texture streaming to load and unload textures dynamically based on the distance and visibility of your objects and scenes. Texture streaming can improve your performance and memory usage by only loading the textures that are needed at any given time. Texture streaming can be done by using different techniques, such as mipmapping, LODs, virtual texturing, etc. You should choose a technique that works well with your software and hardware, as well as your texture resolution and quality.
      • -
      -
    • Q: Where can I learn more about 3D textures?
    • -
    • A: You can learn more about 3D textures by using various resources, such as:
    • -
        -
      • Books: You can read books that teach you the theory and practice of 3D texturing, such as The Essential Guide to 3D in Flash by Rob Bateman and Richard Olsson, Digital Texturing and Painting by Owen Demers, The Complete Guide to Blender Graphics by John M. Blain, etc.
      • -
      • Courses: You can take courses that teach you the skills and techniques of 3D texturing, such as Introduction to Texturing for Games by Pluralsight, Learn 3D Modelling - The Complete Blender Creator Course by Udemy, Challenge, CGTrader Awards, etc. and get feedback or recognition from judges or audiences. You can also view other contestants' entries and get inspired or learn from their work.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Download and Install Free Fire MAX 32 Bit APK from APKPure.md b/spaces/congsaPfin/Manga-OCR/logs/How to Download and Install Free Fire MAX 32 Bit APK from APKPure.md deleted file mode 100644 index 180cf8cb3ae5291465fafbccefb4a8862774c3e3..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to Download and Install Free Fire MAX 32 Bit APK from APKPure.md +++ /dev/null @@ -1,104 +0,0 @@ - -

      Free Fire MAX: How to Download and Play the Ultimate Battle Royale Game on Your Android Device

      -

      If you are a fan of battle royale games, you might have heard of Free Fire, one of the most popular and downloaded games in the genre. But did you know that there is a more enhanced and immersive version of the game called Free Fire MAX? In this article, we will tell you everything you need to know about Free Fire MAX, how to download it from APKPure, and how to play it on your Android device.

      -

      What is Free Fire MAX?

      -

      Free Fire MAX is a premium version of Free Fire that is designed exclusively to deliver a superior gameplay experience in a battle royale. It features ultra HD resolutions, breathtaking effects, realistic animations, and improved sound quality. It also offers a variety of exciting game modes, such as classic, ranked, clash squad, and special modes. You can enjoy all these features with all Free Fire players via exclusive Firelink technology, which allows you to use your existing Free Fire account to play both games.

      -

      free fire max 32 bit apk download apkpure


      DOWNLOAD ★★★★★ https://urlca.com/2uOad4



      -

      Features of Free Fire MAX

      -

      Some of the amazing features of Free Fire MAX are:

      -
        -
      • Enhanced graphics and sound: You can experience the game in stunning detail, with realistic lighting, shadows, textures, and reflections. You can also hear every gunshot, explosion, and footstep with crystal clear sound quality.
      • -
      • Customizable controls and settings: You can adjust the sensitivity, layout, and size of the controls according to your preference. You can also tweak the graphics, sound, and performance settings to optimize the game for your device.
      • -
      • CraftLand mode: This is a unique mode that allows you to create your own maps and share them with other players. You can use various tools and objects to design your own battlegrounds and test them out with your friends.
      • -
      • Fast-paced gameplay: You can enjoy the thrill of a battle royale in 10-minute matches, where you have to survive against 49 other players on an island. You can choose your landing spot, loot weapons and items, drive vehicles, and use various skills and strategies to be the last one standing.
      • -
      • Diverse characters and pets: You can choose from over 40 characters and over 20 pets, each with their own unique abilities and personalities. You can customize their appearance with various outfits, accessories, and skins.
      • -
      -

      Requirements for Free Fire MAX

      -

      To play Free Fire MAX on your Android device, you need to meet the following requirements:

      -

      free fire max 32 bit apk obb download
      -free fire max 32 bit apk latest version
      -free fire max 32 bit apk mod menu
      -free fire max 32 bit apk for android
      -free fire max 32 bit apk pure
      -free fire max 32 bit apk update
      -free fire max 32 bit apk offline
      -free fire max 32 bit apk hack
      -free fire max 32 bit apk no verification
      -free fire max 32 bit apk unlimited diamonds
      -free fire max 32 bit apk and data
      -free fire max 32 bit apk highly compressed
      -free fire max 32 bit apk full version
      -free fire max 32 bit apk mirror
      -free fire max 32 bit apk original
      -free fire max 32 bit apk file
      -free fire max 32 bit apk install
      -free fire max 32 bit apk gameplay
      -free fire max 32 bit apk online
      -free fire max 32 bit apk new update
      -free fire max 32 bit apk beta version
      -free fire max 32 bit apk revdl
      -free fire max 32 bit apk rexdl
      -free fire max 32 bit apk uptodown
      -free fire max 32 bit apk mob.org
      -free fire max 32 bit apk android oyun club
      -free fire max 32 bit apk android republic
      -free fire max 32 bit apk android zone
      -free fire max 32 bit apk android games room
      -free fire max 32 bit apk android authority
      -free fire max 32 bit apk android police
      -free fire max 32 bit apk android central
      -free fire max 32 bit apk android emulator
      -free fire max 32 bit apk android studio
      -free fire max 32 bit apk android app store
      -free fire max 32 bit apk android tv box
      -free fire max 32 bit apk android tablet
      -free fire max 32 bit apk android phone
      -free fire max 32 bit apk android device manager
      -free fire max 32 bit apk android auto
      -free fire max 32 bit apk android wear os
      -free fire max 32 bit apk android go edition
      -free fire max 32 bit apk android one devices
      -free fire max 32 bit apk android pie update
      -free fire max 32 bit apk android q beta program

      -
        -
      • Your device must have Android version 4.4 or higher.
      • -
      • Your device must have at least 2 GB of RAM.
      • -
      • Your device must have at least 4 GB of free storage space.
      • -
      • You must have a stable internet connection.
      • -
      -

      How to Download Free Fire MAX APK from APKPure

      -

      If you want to download Free Fire MAX on your Android device, one of the easiest ways is to use APKPure, a trusted website that offers free and safe APK downloads for various apps and games. Here are the steps to download Free Fire MAX APK from APKPure:

      -

      Step 1: Visit APKPure website

      -

      Open your browser and go to https://apkcombo.com/search/free-fire-max-32-bit, which is the official website of APK

      Step 2: Search for Free Fire MAX

      -

      On the APKPure website, you will see a search bar at the top. Type "Free Fire MAX" and hit enter. You will see a list of results related to Free Fire MAX. Look for the one that has the official logo and the latest version number. Click on it to go to the download page.

      -

      Step 3: Download the XAPK file

      -

      On the download page, you will see a green button that says "Download XAPK". XAPK is a file format that contains both the APK and the OBB data of the game. Click on the button to start downloading the XAPK file. The file size is about 1.2 GB, so make sure you have enough space and a good internet connection.

      -

      Step 4: Install the XAPK file

      -

      After downloading the XAPK file, you need to install it on your device. To do this, you need to use an app called APKPure App, which is also available on the APKPure website. Download and install the APKPure App on your device, then open it. You will see a list of downloaded files on your device. Tap on the Free Fire MAX XAPK file and follow the instructions to install it. You may need to enable unknown sources in your device settings to allow the installation.

      -

      How to Play Free Fire MAX on Your Android Device

      -

      Now that you have installed Free Fire MAX on your device, you are ready to play it. Here are the steps to play Free Fire MAX on your Android device:

      -

      Step 1: Launch the game

      -

      Tap on the Free Fire MAX icon on your home screen or app drawer to launch the game. You will see a loading screen with the game logo and some tips. Wait for the game to load completely.

      -

      Step 2: Log in with your Free Fire account

      -

      After loading, you will see a login screen where you can choose to log in with your Facebook, Google, VK, or Huawei account. You can also use a guest account if you don't have any of these accounts. However, we recommend using a linked account to save your progress and access more features. If you already have a Free Fire account, you can use it to log in to Free Fire MAX as well. You will see your profile and stats synced across both games.

      -

      Step 3: Choose your game mode and settings

      -

      Once you are logged in, you will see the main menu where you can choose your game mode, settings, character, pet, and more. You can swipe left or right to browse through different options. You can also tap on the icons at the bottom to access other features, such as friends, events, store, and inventory. To start a match, tap on the "Start" button at the bottom right corner. You can choose between solo, duo, or squad mode, and between classic or ranked mode. You can also join or create a custom room with your own rules and invite other players.

      -

      Step 4: Enjoy the premium gameplay experience

      -

      After choosing your mode and settings, you will enter a lobby where you can wait for other players to join or invite your friends. You can also chat with other players, change your character or pet, or check out other features while waiting. When the match starts, you will be on a plane with other players. You can choose where to jump off by tapping on the map or following your teammates' markers. Once you land, you need to find weapons, items, and vehicles to survive and fight against other players. The safe zone will shrink over time, forcing you to move closer to other players. The last player or team alive wins the match.

      -

      Conclusion

      -

      Free Fire MAX is an awesome game that offers a premium gameplay experience in a battle royale genre. It has amazing graphics, sound, controls, and features that make it stand out from other games. You can download it from APKPure easily and play it on your Android device with your existing Free Fire account. If you are looking for a new and exciting way to enjoy a battle royale game, you should definitely try out Free Fire MAX.

      -

      FAQs

      -

      Here are some frequently asked questions about Free Fire MAX:

      -
        -
      • Q: Is Free Fire MAX free?
      • -
      • A: Yes, Free Fire MAX is free to download and play. However, it may contain some in-app purchases that require real money.
      • -
      • Q: Is Free Fire MAX compatible with Free Fire?
      • A: Yes, Free Fire MAX is compatible with Free Fire. You can use your existing Free Fire account to play both games and enjoy the same features and events. You can also play with other players who are using either game. -
      • Q: How can I update Free Fire MAX?
      • -
      • A: You can update Free Fire MAX by using the APKPure App, which will notify you when there is a new version available. You can also check the APKPure website for the latest updates and download them manually.
      • -
      • Q: What are the differences between Free Fire and Free Fire MAX?
      • -
      • A: The main differences between Free Fire and Free Fire MAX are the graphics, sound, and performance. Free Fire MAX has higher quality graphics and sound, but it also requires more storage space and RAM. Free Fire has lower quality graphics and sound, but it also runs smoother on low-end devices.
      • -
      • Q: Can I play Free Fire MAX on PC?
      • -
      • A: Yes, you can play Free Fire MAX on PC by using an Android emulator, such as BlueStacks or NoxPlayer. You can download the emulator from their official websites and install it on your PC. Then, you can download the Free Fire MAX APK from APKPure and install it on the emulator. You can also use your keyboard and mouse to control the game.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Lost in Blue Mod Experience the Island Life with Amazing Graphics and Gameplay.md b/spaces/congsaPfin/Manga-OCR/logs/Lost in Blue Mod Experience the Island Life with Amazing Graphics and Gameplay.md deleted file mode 100644 index 24f6fa6d9b392de058ec7009852434ae29ae4118..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Lost in Blue Mod Experience the Island Life with Amazing Graphics and Gameplay.md +++ /dev/null @@ -1,126 +0,0 @@ - -

      Lost in Blue Mod: A Survival Adventure Game with Unlimited Money and Menu Mod

      -

      If you are looking for a thrilling and challenging survival game, then you might want to try Lost in Blue Mod. This is a modded version of the original game Lost in Blue, which is a post-apocalyptic adventure game where you have to survive on an island full of dangers and mysteries. In this article, we will tell you everything you need to know about Lost in Blue Mod, including its features, how to download and install it, how to play it, and its pros and cons.

      -

      What is Lost in Blue Mod?

      -

      Lost in Blue Mod is a modified version of the game Lost in Blue, which is developed by Volcano Force. In this game, you play as a survivor who wakes up on an island after a plane crash. You have to explore the island, gather resources, craft tools and weapons, build shelters, hunt animals, fight enemies, and find other survivors. You also have to deal with hunger, thirst, fatigue, weather, and diseases. The game has realistic graphics, immersive sound effects, and dynamic day-night cycles. The game also has a story mode where you can follow the main plot and uncover the secrets of the island.

      -

      lost in blue mod


      Download File 🔗 https://urlca.com/2uOd36



      -

      Lost in Blue Mod adds some features to the original game that make it more fun and easy to play. These features include unlimited money, menu mod, and no ads. We will explain these features in more detail below.

      -

      Features of Lost in Blue Mod

      -

      Unlimited Money

      -

      One of the features of Lost in Blue Mod is unlimited money. This means that you can buy anything you want from the shop without worrying about the cost. You can buy food, water, medicine, clothes, weapons, tools, and more. This will help you survive longer and easier on the island. You can also upgrade your skills and abilities with unlimited money.

      -

      Menu Mod

      -

      Another feature of Lost in Blue Mod is menu mod. This means that you can access a special menu that gives you some options to customize your game experience. For example, you can enable or disable god mode, which makes you invincible to any damage. You can also enable or disable one-hit kill, which lets you kill any enemy with one hit. You can also enable or disable free crafting, which lets you craft anything without needing any materials. You can also enable or disable no hunger, no thirst, no fatigue, no disease, and more.

      -

      No Ads

      -

      The last feature of Lost in Blue Mod is no ads. This means that you can enjoy the game without any interruptions or distractions from annoying ads. You can play the game smoothly and peacefully without having to watch any videos or banners.

      -

      How to Download and Install Lost in Blue Mod APK?

      -

      If you want to download and install Lost in Blue Mod APK on your Android device, then you need to follow these simple steps:

      -

      lost in blue mod apk download
      -lost in blue mod unlimited money
      -lost in blue mod menu
      -lost in blue mod apk latest version
      -lost in blue mod apk android 1
      -lost in blue mod apk revdl
      -lost in blue mod apk happymod
      -lost in blue mod apk offline
      -lost in blue mod apk free craft
      -lost in blue mod apk unlimited everything
      -lost in blue mod apk no root
      -lost in blue mod apk obb
      -lost in blue mod apk rexdl
      -lost in blue mod apk pure
      -lost in blue mod apk 2023
      -lost in blue mod apk data
      -lost in blue mod apk unlimited gems
      -lost in blue mod apk unlimited coins
      -lost in blue mod apk unlimited resources
      -lost in blue mod apk unlimited health
      -lost in blue mod apk god mode
      -lost in blue mod apk mega
      -lost in blue mod apk mediafıre
      -lost in blue mod apk vip
      -lost in blue mod apk pro
      -lost in blue mod ios
      -lost in blue mod pc
      -lost in blue survival simulator mod apk
      -lost in blue island survival adventure mod apk
      -how to install lost in blue mod apk
      -how to play lost in blue mod apk
      -how to update lost in blue mod apk
      -how to hack lost in blue mod apk
      -how to get lost in blue mod apk
      -how to download lost in blue mod apk for free
      -is lost in blue mod apk safe
      -is lost in blue mod apk legal
      -is there a cheat for lost in blue game?
      -what is the best weapon in lost in blue game?
      -what is the best strategy for playing lost in blue game?
      -what are the features of the lost in blue game?
      -what are the tips and tricks for the lost in blue game?
      -what are the reviews of the lost in blue game?
      -what are the alternatives to the lost in blue game?
      -what are the updates of the lost in blue game?
      -what are the secrets of the lost in blue game?
      -what are the glitches of the lost in blue game?

      -

      Steps to Download Lost in Blue Mod APK

      -
        -
      1. Go to the link which is a trusted source for downloading modded APK files.
      2. -
      3. Click on the download button and wait for the file to be downloaded on your device.
      4. -
      5. Once the download is complete, locate the file on your device's file manager or downloads folder.
      6. -
      -

      Steps to Install Lost in Blue Mod APK

      -
        -
      1. Before installing the APK file, make sure that you have enabled the installation of apps from unknown sources on your device's settings. If you don't know how to do this, you can follow these steps:
      2. -
      3. Go to your device's settings and look for the security or privacy option.
      4. -
      5. Find the option that says "Unknown sources" or "Allow installation of apps from unknown sources" and toggle it on.
      6. -
      7. Confirm your choice by tapping OK or Yes.
      8. -
      9. Now, go back to the APK file and tap on it to start the installation process.
      10. -
      11. Follow the instructions on the screen and wait for the installation to finish.
      12. -
      13. Once the installation is done, you can launch the game from your app drawer or home screen.
      14. -
      -

      How to Play Lost in Blue Mod?

      -

      Playing Lost in Blue Mod is similar to playing the original game, but with some added advantages. Here are some tips and tricks for playing Lost in Blue Mod:

      -

      Tips and Tricks for Playing Lost in Blue Mod

      -
        -
      • Use the menu mod to customize your game experience according to your preference. You can enable or disable any option you want, such as god mode, one-hit kill, free crafting, etc. However, be careful not to abuse these options too much, as they might make the game too easy or boring.
      • -
      • Use the unlimited money feature to buy anything you need from the shop. You can buy food, water, medicine, clothes, weapons, tools, and more. You can also upgrade your skills and abilities with unlimited money.
      • -
      • Explore the island and discover its secrets. You can find hidden items, clues, puzzles, and mysteries on the island. You can also interact with other survivors and learn more about their stories and backgrounds.
      • -
      • Craft tools and weapons to help you survive. You can craft various items using the materials you gather from the island. You can craft knives, axes, bows, arrows, spears, traps, fishing rods, etc. You can also craft shelters, fireplaces, beds, tables, etc.
      • -
      • Hunt animals and gather plants for food. You can hunt different animals on the island, such as deer, rabbits, boars, bears, etc. You can also gather various plants and fruits for food. However, be careful not to eat anything poisonous or rotten.
      • -
      • Fight enemies and defend yourself. You will encounter various enemies on the island, such as zombies, mutants, bandits, etc. You will have to fight them using your weapons and skills. You can also use stealth and traps to avoid or ambush them.
      • -
      • Follow the story mode and complete the missions. You can follow the main plot of the game and complete the missions given by other survivors or by yourself. You will have to face various challenges and dangers along the way. You will also have to make choices that will affect the outcome of the game.
      • -
      -

      Pros and Cons of Lost in Blue Mod

      -

      Lost in Blue Mod is a fun and exciting survival game that offers a lot of features and advantages over the original game. However, it also has some drawbacks that you should be aware of. Here are some pros and cons of Lost in Blue Mod:

      -

      Pros of Lost in Blue Mod

      -
        -
      • It has unlimited money that lets you buy anything you want from the shop.
      • -
      • It has menu mod that lets you customize your game experience according to your preference.
      • -
      • It has no ads that interrupt or distract you from playing the game.
      • -
      • It has realistic graphics, immersive sound effects, and dynamic day-night cycles that create a realistic and immersive survival experience.
      • -
      • It has a story mode that lets you follow the main plot and uncover the secrets of the island.
      • -
      -

      Cons of Lost in Blue Mod

      -
        -
      • It might be too easy or boring if you abuse the menu mod options too much.
      • -
      • It might not be compatible with some devices or Android versions.
      • -
      • It might have some bugs or glitches that affect the gameplay or performance of the game.
      • -
      • It might not be updated regularly or frequently by the mod developers.
      • -
      -

      Conclusion

      -

      In conclusion, Lost in Blue Mod is a survival adventure game that lets you survive on an island full of dangers and mysteries. It has unlimited money, menu mod, and no ads that make it more fun and easy to play. It also has realistic graphics, immersive sound effects, dynamic day-night cycles, and a story mode that create a realistic and immersive survival experience. However, it also has some drawbacks that you should be aware of, such as compatibility issues, bugs or glitches, lack of updates, or boredom. If you are interested in playing Lost in Blue Mod, you can download and install it from the link and follow the steps we provided. We hope you enjoy playing Lost in Blue Mod and have a great survival adventure.

      -

      FAQs

      -

      Here are some frequently asked questions about Lost in Blue Mod:

      -
        -
      1. What is the difference between Lost in Blue and Lost in Blue Mod?
      2. -

        Lost in Blue is the original game developed by Volcano Force, while Lost in Blue Mod is a modified version of the game that adds some features such as unlimited money, menu mod, and no ads.

        -
      3. Is Lost in Blue Mod safe to download and install?
      4. -

        Yes, Lost in Blue Mod is safe to download and install, as long as you use the link that we provided, which is a trusted source for modded APK files. However, you should always be careful when downloading and installing any app from unknown sources, as they might contain viruses or malware that can harm your device.

        -
      5. Can I play Lost in Blue Mod offline?
      6. -

        Yes, you can play Lost in Blue Mod offline, as it does not require an internet connection to run. However, you might need an internet connection to download and install the game, or to access some online features such as leaderboards or achievements.

        -
      7. Can I play Lost in Blue Mod with my friends?
      8. -

        No, Lost in Blue Mod does not support multiplayer mode, as it is a single-player game. You can only play with other survivors that you meet on the island, who are controlled by the game's AI.

        -
      9. How can I contact the developers of Lost in Blue Mod?
      10. -

        You can contact the developers of Lost in Blue Mod by visiting their website , where you can find their email address, social media accounts, and other information. You can also leave a comment or feedback on their website or on the download page .

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/WhatsApp Business for Mac The Ultimate Solution for Your Business Communication Needs.md b/spaces/congsaPfin/Manga-OCR/logs/WhatsApp Business for Mac The Ultimate Solution for Your Business Communication Needs.md deleted file mode 100644 index 60ba07b11c8b6eb91ff95dc941b2c9a60112cd42..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/WhatsApp Business for Mac The Ultimate Solution for Your Business Communication Needs.md +++ /dev/null @@ -1,209 +0,0 @@ - -

      How to Download WhatsApp for Business Mac

      -

      WhatsApp is one of the most popular messaging apps in the world, with over 2 billion users. But did you know that there is also a version of WhatsApp designed specifically for businesses? WhatsApp for business is a free-to-download app that allows you to communicate with your customers, clients, partners, and suppliers in a fast, simple, and secure way. Whether you run a small business or a large corporation, WhatsApp for business can help you transform your customer experience, drive sales, and improve customer support.

      -

      If you are a Mac user, you might be wondering how to download WhatsApp for business on your device. In this article, we will show you how to download and install WhatsApp for business app on Mac, how to set up and use the app for your business needs, and some tips and tricks to make the most of it. Let's get started!

      -

      download whatsapp for business mac


      DOWNLOADhttps://urlca.com/2uO80t



      -

      What is WhatsApp for Business?

      -

      WhatsApp for business is a separate app from WhatsApp Messenger that allows you to create a business profile, showcase your products and services, send automated messages, manage your chats and contacts, and access various tools and settings. Some of the features and benefits of WhatsApp for business are:

      -
        -
      • You can create a business profile with your logo, description, address, website, email, hours of operation, catalog, etc.
      • -
      • You can showcase your products and services in a catalog that customers can browse and order from within the app.
      • -
      • You can send automated messages such as greetings, away messages, quick replies, etc. to save time and provide better customer service.
      • -
      • You can manage your chats and contacts by creating labels, sorting chats by unread or starred messages, archiving chats, blocking contacts, etc.
      • -
      • You can access various tools and settings such as notifications, storage, data usage, statistics, etc. to optimize your app performance.
      • -
      • You can use WhatsApp Web or Desktop to access your chats from any browser or computer.
      • -
      -

      WhatsApp for business is compatible with Mac OS X 10.11 or later. You can download it from the Mac App Store or from the official website.

      -

      How to Download and Install WhatsApp for Business App on Mac

      -

      To download and install WhatsApp for business app on Mac, you have two options: download it from the Mac App Store or download it from the official website. Here are the steps for each option:

      -

      Download from the Mac App Store

      -

      To download WhatsApp for business app from the Mac App Store, follow these steps:

      -
        -
      1. Open the Mac App Store on your Mac.
      2. -
      3. Search for WhatsApp for Business in the search bar.
      4. -
      5. Click on the Get button to download the app.
      6. -
      7. Once the app is downloaded, click on the Open button to launch it.
      8. -
      -

      Download from the Official Website

      -

      To download WhatsApp for business app from the official website, follow these steps:

      -
        -
      1. Open your web browser and go to https://www.whatsapp.com/business/.
      2. -
      3. Click on the Download button and choose Mac OS X 10.11 or later.
      4. -
      5. The app will start downloading as a .zip file. Once the download is complete, open the file and drag the WhatsApp icon to your Applications folder.
      6. -
      7. Double-click on the WhatsApp icon in your Applications folder to launch the app.
      8. -
      -

      How to Set Up and Use WhatsApp for Business App on Mac

      -

      After you have downloaded and installed WhatsApp for business app on Mac, you need to set up and use the app for your business needs. Here are the steps to do that:

      -

      Verify Your Business Phone Number

      -

      To use WhatsApp for business app on Mac, you need to verify your business phone number and link it to your WhatsApp account. Here are the steps to do that:

      -
        -
      1. Open the WhatsApp for business app on your Mac.
      2. -
      3. Select your country code and enter your business phone number. Make sure it is a valid number that can receive SMS or calls.
      4. -
      5. Click on Next. You will receive a verification code via SMS or a phone call.
      6. -
      7. Enter the verification code in the app and click on Verify.
      8. -
      9. If you have an existing WhatsApp account with the same phone number, you will be asked to confirm if you want to use that account for WhatsApp for business. Click on Continue.
      10. -
      11. If you don't have an existing WhatsApp account with the same phone number, you will be asked to create a new account. Enter your name and click on Create Account.
      12. -
      -

      Restore Your Account from a Backup

      -

      If you have previously used WhatsApp for business app on another device, you can restore your chat history and media from a backup. Here are the steps to do that:

      -

      How to download whatsapp for business app on mac
      -Whatsapp for business mac desktop app download
      -Download whatsapp for business mac os x
      -Whatsapp for business macbook download free
      -Whatsapp for business app download for macbook pro
      -Download whatsapp for business macbook air
      -Whatsapp for business app download for macbook air m1
      -Whatsapp for business mac dmg download
      -Download whatsapp for business app on mac mini
      -Whatsapp for business app download for mac pro
      -Download whatsapp for business app on imac
      -Whatsapp for business app download for imac m1
      -Whatsapp for business mac version download
      -Download whatsapp for business app on macbook m1
      -Whatsapp for business app download for mac catalina
      -Download whatsapp for business app on mac big sur
      -Whatsapp for business app download for mac mojave
      -Download whatsapp for business app on mac high sierra
      -Whatsapp for business app download for mac sierra
      -Download whatsapp for business app on mac el capitan
      -Whatsapp for business app download for mac yosemite
      -Download whatsapp for business app on mac lion
      -Whatsapp for business app download for mac snow leopard
      -Download whatsapp web for business on mac
      -Whatsapp web scanner download for business on mac
      -Download whatsapp web desktop app for business on mac
      -Whatsapp web qr code download for business on mac
      -Download whatsapp web apk for business on mac
      -Whatsapp web extension download for business on mac
      -Download whatsapp web chrome extension for business on mac
      -Whatsapp web firefox extension download for business on mac
      -Download whatsapp web safari extension for business on mac
      -Whatsapp web opera extension download for business on mac
      -Download whatsapp web microsoft edge extension for business on mac
      -Whatsapp web browser download for business on mac
      -Download whatsapp web browser app for business on mac
      -Whatsapp web browser plugin download for business on mac
      -Download whatsapp web browser extension for business on mac
      -Whatsapp web browser qr code download for business on mac
      -Download whatsapp web browser apk for business on mac
      -How to install whatsapp web browser apk file in bluestacks emulator and use it as a desktop application to run your whatsapp account from your computer.

      -
        -
      1. After verifying your phone number, you will see a screen asking if you want to restore your chats and media from iCloud. Click on Restore Chat History.
      2. -
      3. The app will start restoring your chats and media from iCloud. This may take some time depending on the size of your backup and your internet connection speed.
      4. -
      5. Once the restoration is complete, click on Next. You will see your chats and media in the app.
      6. -
      -

      Set Your Business Name and Profile

      -

      To create a professional image for your business, you need to set your business name and profile in the app. Here are the steps to do that:

      -
        -
      1. In the app, click on Edit Profile.
      2. -
      3. You will see a screen where you can enter your business name, logo, description, address, website, email, hours of operation, catalog, etc.
      4. -
      5. Fill in as much information as possible and make sure it is accurate and relevant.
      6. -
      7. You can also add or edit your catalog by clicking on Catalog. You can add products or services, prices, descriptions, images, links, etc.
      8. -
      9. Once you are done, click on Save Changes. Your business profile will be visible to your customers in the app.
      10. -
      -

      Manage Your Chats and Contacts

      -

      To communicate with your customers, clients, partners, and suppliers, you need to manage your chats and contacts in the app. Here are some of the things you can do:

      -
        -
      • You can send and receive messages, including text, voice, images, videos, documents, etc. You can also send stickers, emojis, GIFs, etc.
      • -
      • You can
      • You can create labels to organize your chats and contacts by categories, such as new customer, order placed, payment pending, etc. You can also filter your chats by labels.
      • -
      • You can use quick replies to send predefined messages to answer common questions or requests. You can also create your own quick replies and edit them as needed.
      • -
      • You can send automated messages such as greetings, away messages, etc. to let your customers know when you are available or unavailable. You can also schedule these messages for specific times or days.
      • -
      • You can archive chats to hide them from your chat list. You can also unarchive them whenever you want.
      • -
      • You can block contacts to prevent them from messaging you or seeing your profile. You can also unblock them whenever you want.
      • -
      -

      Access Your Settings and Tools

      -

      To optimize your app performance and user experience, you need to access your settings and tools in the app. Here are some of the things you can do:

      -
        -
      • You can access your notifications to customize how you receive alerts for new messages, calls, etc. You can also mute or unmute chats or contacts.
      • -
      • You can access your storage to manage how much space the app uses on your Mac. You can also delete or clear chats, media, documents, etc.
      • -
      • You can access your data usage to monitor how much data the app consumes on your Mac. You can also reduce data usage by disabling media auto-download or lowering call quality.
      • -
      • You can access your statistics to view how many messages, calls, etc. you have sent and received in the app. You can also reset the statistics whenever you want.
      • -
      • You can access your help to get support, report a problem, contact us, or learn more about the app.
      • -
      -

      Tips and Tricks for Using WhatsApp for Business App on Mac

      -

      To make the most of WhatsApp for business app on Mac, here are some tips and tricks you can use:

      -

      Use Keyboard Shortcuts

      -

      To perform common actions faster, you can use keyboard shortcuts in the app. Here are some of the keyboard shortcuts you can use:

      - - - - - - - - - - - - - - - - - -< - - - - - - - - - - - - -< td>Command + , - - - -
      ActionShortcut
      New chatCommand + N
      New groupCommand + Shift + N
      New broadcast listCommand + Shift + B
      New catalog itemCommand + Shift + C
      Edit profileCommand + E
      Mute chatCommand + Shift + M
      Delete chatDelete or Command + Delete
      Archive chatCommand + Shift + A
      Paste without formattingCommand + Shift + V
      Select all text in message boxCommand + A
      Cut selected text in message boxCommand + X
      Copy selected text in message boxCommand + C
      Paste text in message boxCommand + V
      Bold selected text in message boxCommand + B
      Italicize selected text in message boxCommand + I
      Add strikethrough to selected text in message box Command + Shift + X
      Add monospace to selected text in message boxCommand + Shift + M
      Send messageReturn or Command + Return
      Search chatCommand + F
      Search chat listCommand + Shift + F
      Zoom inCommand + Plus (+)
      Zoom outCommand + Minus (-)
      Reset zoomCommand + 0
      Minimize windowCommand + M
      Close windowCommand + W
      Show or hide app windowCommand + H
      Show or hide app menu bar iconCommand + Shift + H
      Show or hide app dock iconCommand + Shift + D
      Show or hide app preferences
      Show or hide app helpCommand + ?
      Check for app updatesCommand + U
      Quit appCommand + Q
      -

      Use Dark Mode

      -

      To reduce eye strain and save battery life, you can use dark mode in the app. Dark mode changes the app's background color to black and the text color to white. Here are the steps to enable dark mode:

      -
        -
      1. In the app, click on Edit Profile.
      2. -
      3. Click on Theme.
      4. -
      5. Select Dark.
      6. -
      7. Click on Save Changes.
      8. -
      -

      Use WhatsApp Web or Desktop

      -

      To access your chats from any browser or computer, you can use WhatsApp Web or Desktop. WhatsApp Web or Desktop allows you to sync your chats and media between your Mac and your phone. Here are the steps to use WhatsApp Web or Desktop:

      -
        -
      1. On your phone, open WhatsApp for business app and tap on Settings.
      2. -
      3. Tap on WhatsApp Web/Desktop.
      4. -
      5. On your Mac, open your web browser and go to https://web.whatsapp.com/ or open WhatsApp for business app.
      6. -
      7. On your Mac, scan the QR code displayed on your phone.
      8. -
      9. You will see your chats and media on your Mac. You can send and receive messages, make and receive calls, etc.
      10. -
      -

      Conclusion

      -

      In this article, we have shown you how to download WhatsApp for business app on Mac, how to set up and use the app for your business needs, and some tips and tricks to make the most of it. WhatsApp for business is a powerful tool that can help you communicate with your customers, clients, partners, and suppliers in a fast, simple, and secure way. Whether you run a small business or a large corporation, WhatsApp for business can help you transform your customer experience, drive sales, and improve customer support.

      -

      If you have any questions or feedback about WhatsApp for business app on Mac, feel free to contact us or leave a comment below. We would love to hear from you!

      -

      Frequently Asked Questions

      -

      Here are some of the frequently asked questions about WhatsApp for business app on Mac:

      -

      Is WhatsApp for business free?

      -

      Yes, WhatsApp for business is free to download and use. However, you may incur data charges from your internet service provider or mobile carrier.

      -

      Can I use WhatsApp for business and WhatsApp Messenger on the same phone?

      -

      No, you can only use one WhatsApp account per phone number. If you want to use both apps on the same phone, you need to have two different phone numbers.

      -

      Can I use WhatsApp for business on multiple devices?

      -

      You can use WhatsApp for business on one phone and one Mac at the same time. However, you cannot use it on more than one phone or more than one Mac at the same time.

      -

      How can I backup my chats and media in WhatsApp for business?

      -

      You can backup your chats and media in WhatsApp for business using iCloud. To do that, follow these steps:

      -
        -
      1. On your phone, open WhatsApp for business app and tap on Settings.
      2. -
      3. Tap on Chats.
      4. -
      5. Tap on Chat Backup.
      6. -
      7. Tap on Back Up Now.
      8. -
      9. You can also enable Auto Backup to backup your chats and media daily, weekly, or monthly.
      10. -
      -

      How can I delete my account in WhatsApp for business?

      -

      If you want to delete your account in WhatsApp for business, follow these steps:

      -
        -
      1. On your phone, open WhatsApp for business app and tap on Settings.
      2. -
      3. Tap on Account.
      4. -
      5. Tap on Delete My Account.
      6. -
      7. Enter your phone number and tap on Delete My Account.
      8. -
      9. This will delete your account, profile, chats, media, etc. from WhatsApp for business.
      10. -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/ops/sync_bn.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/ops/sync_bn.py deleted file mode 100644 index c9b016fcbe860989c56cd1040034bcfa60e146d2..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/ops/sync_bn.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.distributed as dist -import torch.nn.functional as F -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.module import Module -from torch.nn.parameter import Parameter - -from annotator.uniformer.mmcv.cnn import NORM_LAYERS -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', [ - 'sync_bn_forward_mean', 'sync_bn_forward_var', 'sync_bn_forward_output', - 'sync_bn_backward_param', 'sync_bn_backward_data' -]) - - -class SyncBatchNormFunction(Function): - - @staticmethod - def symbolic(g, input, running_mean, running_var, weight, bias, momentum, - eps, group, group_size, stats_mode): - return g.op( - 'mmcv::MMCVSyncBatchNorm', - input, - running_mean, - running_var, - weight, - bias, - momentum_f=momentum, - eps_f=eps, - group_i=group, - group_size_i=group_size, - stats_mode=stats_mode) - - @staticmethod - def forward(self, input, running_mean, running_var, weight, bias, momentum, - eps, group, group_size, stats_mode): - self.momentum = momentum - self.eps = eps - self.group = group - self.group_size = group_size - self.stats_mode = stats_mode - - assert isinstance( - input, (torch.HalfTensor, torch.FloatTensor, - torch.cuda.HalfTensor, torch.cuda.FloatTensor)), \ - f'only support Half or Float Tensor, but {input.type()}' - output = torch.zeros_like(input) - input3d = input.flatten(start_dim=2) - output3d = output.view_as(input3d) - num_channels = input3d.size(1) - - # ensure mean/var/norm/std are initialized as zeros - # ``torch.empty()`` does not guarantee that - mean = torch.zeros( - num_channels, dtype=torch.float, device=input3d.device) - var = torch.zeros( - num_channels, dtype=torch.float, device=input3d.device) - norm = torch.zeros_like( - input3d, dtype=torch.float, device=input3d.device) - std = torch.zeros( - num_channels, dtype=torch.float, device=input3d.device) - - batch_size = input3d.size(0) - if batch_size > 0: - ext_module.sync_bn_forward_mean(input3d, mean) - batch_flag = torch.ones([1], device=mean.device, dtype=mean.dtype) - else: - # skip updating mean and leave it as zeros when the input is empty - batch_flag = torch.zeros([1], device=mean.device, dtype=mean.dtype) - - # synchronize mean and the batch flag - vec = torch.cat([mean, batch_flag]) - if self.stats_mode == 'N': - vec *= batch_size - if self.group_size > 1: - dist.all_reduce(vec, group=self.group) - total_batch = vec[-1].detach() - mean = vec[:num_channels] - - if self.stats_mode == 'default': - mean = mean / self.group_size - elif self.stats_mode == 'N': - mean = mean / total_batch.clamp(min=1) - else: - raise NotImplementedError - - # leave var as zeros when the input is empty - if batch_size > 0: - ext_module.sync_bn_forward_var(input3d, mean, var) - - if self.stats_mode == 'N': - var *= batch_size - if self.group_size > 1: - dist.all_reduce(var, group=self.group) - - if self.stats_mode == 'default': - var /= self.group_size - elif self.stats_mode == 'N': - var /= total_batch.clamp(min=1) - else: - raise NotImplementedError - - # if the total batch size over all the ranks is zero, - # we should not update the statistics in the current batch - update_flag = total_batch.clamp(max=1) - momentum = update_flag * self.momentum - ext_module.sync_bn_forward_output( - input3d, - mean, - var, - weight, - bias, - running_mean, - running_var, - norm, - std, - output3d, - eps=self.eps, - momentum=momentum, - group_size=self.group_size) - self.save_for_backward(norm, std, weight) - return output - - @staticmethod - @once_differentiable - def backward(self, grad_output): - norm, std, weight = self.saved_tensors - grad_weight = torch.zeros_like(weight) - grad_bias = torch.zeros_like(weight) - grad_input = torch.zeros_like(grad_output) - grad_output3d = grad_output.flatten(start_dim=2) - grad_input3d = grad_input.view_as(grad_output3d) - - batch_size = grad_input3d.size(0) - if batch_size > 0: - ext_module.sync_bn_backward_param(grad_output3d, norm, grad_weight, - grad_bias) - - # all reduce - if self.group_size > 1: - dist.all_reduce(grad_weight, group=self.group) - dist.all_reduce(grad_bias, group=self.group) - grad_weight /= self.group_size - grad_bias /= self.group_size - - if batch_size > 0: - ext_module.sync_bn_backward_data(grad_output3d, weight, - grad_weight, grad_bias, norm, std, - grad_input3d) - - return grad_input, None, None, grad_weight, grad_bias, \ - None, None, None, None, None - - -@NORM_LAYERS.register_module(name='MMSyncBN') -class SyncBatchNorm(Module): - """Synchronized Batch Normalization. - - Args: - num_features (int): number of features/chennels in input tensor - eps (float, optional): a value added to the denominator for numerical - stability. Defaults to 1e-5. - momentum (float, optional): the value used for the running_mean and - running_var computation. Defaults to 0.1. - affine (bool, optional): whether to use learnable affine parameters. - Defaults to True. - track_running_stats (bool, optional): whether to track the running - mean and variance during training. When set to False, this - module does not track such statistics, and initializes statistics - buffers ``running_mean`` and ``running_var`` as ``None``. When - these buffers are ``None``, this module always uses batch - statistics in both training and eval modes. Defaults to True. - group (int, optional): synchronization of stats happen within - each process group individually. By default it is synchronization - across the whole world. Defaults to None. - stats_mode (str, optional): The statistical mode. Available options - includes ``'default'`` and ``'N'``. Defaults to 'default'. - When ``stats_mode=='default'``, it computes the overall statistics - using those from each worker with equal weight, i.e., the - statistics are synchronized and simply divied by ``group``. This - mode will produce inaccurate statistics when empty tensors occur. - When ``stats_mode=='N'``, it compute the overall statistics using - the total number of batches in each worker ignoring the number of - group, i.e., the statistics are synchronized and then divied by - the total batch ``N``. This mode is beneficial when empty tensors - occur during training, as it average the total mean by the real - number of batch. - """ - - def __init__(self, - num_features, - eps=1e-5, - momentum=0.1, - affine=True, - track_running_stats=True, - group=None, - stats_mode='default'): - super(SyncBatchNorm, self).__init__() - self.num_features = num_features - self.eps = eps - self.momentum = momentum - self.affine = affine - self.track_running_stats = track_running_stats - group = dist.group.WORLD if group is None else group - self.group = group - self.group_size = dist.get_world_size(group) - assert stats_mode in ['default', 'N'], \ - f'"stats_mode" only accepts "default" and "N", got "{stats_mode}"' - self.stats_mode = stats_mode - if self.affine: - self.weight = Parameter(torch.Tensor(num_features)) - self.bias = Parameter(torch.Tensor(num_features)) - else: - self.register_parameter('weight', None) - self.register_parameter('bias', None) - if self.track_running_stats: - self.register_buffer('running_mean', torch.zeros(num_features)) - self.register_buffer('running_var', torch.ones(num_features)) - self.register_buffer('num_batches_tracked', - torch.tensor(0, dtype=torch.long)) - else: - self.register_buffer('running_mean', None) - self.register_buffer('running_var', None) - self.register_buffer('num_batches_tracked', None) - self.reset_parameters() - - def reset_running_stats(self): - if self.track_running_stats: - self.running_mean.zero_() - self.running_var.fill_(1) - self.num_batches_tracked.zero_() - - def reset_parameters(self): - self.reset_running_stats() - if self.affine: - self.weight.data.uniform_() # pytorch use ones_() - self.bias.data.zero_() - - def forward(self, input): - if input.dim() < 2: - raise ValueError( - f'expected at least 2D input, got {input.dim()}D input') - if self.momentum is None: - exponential_average_factor = 0.0 - else: - exponential_average_factor = self.momentum - - if self.training and self.track_running_stats: - if self.num_batches_tracked is not None: - self.num_batches_tracked += 1 - if self.momentum is None: # use cumulative moving average - exponential_average_factor = 1.0 / float( - self.num_batches_tracked) - else: # use exponential moving average - exponential_average_factor = self.momentum - - if self.training or not self.track_running_stats: - return SyncBatchNormFunction.apply( - input, self.running_mean, self.running_var, self.weight, - self.bias, exponential_average_factor, self.eps, self.group, - self.group_size, self.stats_mode) - else: - return F.batch_norm(input, self.running_mean, self.running_var, - self.weight, self.bias, False, - exponential_average_factor, self.eps) - - def __repr__(self): - s = self.__class__.__name__ - s += f'({self.num_features}, ' - s += f'eps={self.eps}, ' - s += f'momentum={self.momentum}, ' - s += f'affine={self.affine}, ' - s += f'track_running_stats={self.track_running_stats}, ' - s += f'group_size={self.group_size},' - s += f'stats_mode={self.stats_mode})' - return s diff --git a/spaces/daddyjin/TalkingFaceGeneration/FONT/logger.py b/spaces/daddyjin/TalkingFaceGeneration/FONT/logger.py deleted file mode 100644 index 9810ebcf938fa75cca17db79d48753944b928b84..0000000000000000000000000000000000000000 --- a/spaces/daddyjin/TalkingFaceGeneration/FONT/logger.py +++ /dev/null @@ -1,225 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -import imageio - -import os -from skimage.draw import circle - -import matplotlib.pyplot as plt -import collections - - -class Logger: - def __init__(self, log_dir, checkpoint_freq=50, visualizer_params=None, zfill_num=8, log_file_name='log.txt'): - - self.loss_list = [] - self.cpk_dir = log_dir - self.visualizations_dir = os.path.join(log_dir, 'train-vis') - if not os.path.exists(self.visualizations_dir): - os.makedirs(self.visualizations_dir) - self.log_file = open(os.path.join(log_dir, log_file_name), 'a') - self.zfill_num = zfill_num - self.visualizer = Visualizer(**visualizer_params) - self.checkpoint_freq = checkpoint_freq - self.epoch = 0 - self.best_loss = float('inf') - self.names = None - - def log_scores(self, loss_names): - loss_mean = np.array(self.loss_list).mean(axis=0) - - loss_string = "; ".join(["%s - %.5f" % (name, value) for name, value in zip(loss_names, loss_mean)]) - loss_string = str(str(self.epoch)+str(self.step).zfill(self.zfill_num)) + ") " + loss_string - - print(loss_string, file=self.log_file) - self.loss_list = [] - self.log_file.flush() - - def visualize_rec(self, inp, out): - # image = self.visualizer.visualize(inp['driving'], inp['source'], out) - image = self.visualizer.visualize(inp['driving'][:,-1], inp['transformed_driving'][:,-1], inp['example_image'], out) - imageio.imsave(os.path.join(self.visualizations_dir, "%s-%s-rec.png" % (str(self.epoch),str(self.step).zfill(self.zfill_num))), image) - - def save_cpk(self, emergent=False): - cpk = {k: v.state_dict() for k, v in self.models.items()} - cpk['epoch'] = self.epoch - cpk['step'] = self.step - cpk_path = os.path.join(self.cpk_dir, '%s-%s-checkpoint.pth.tar' % (str(self.epoch),str(self.step).zfill(self.zfill_num))) - if not (os.path.exists(cpk_path) and emergent): - torch.save(cpk, cpk_path) - - @staticmethod - def load_cpk(checkpoint_path, generator=None, discriminator=None, kp_detector=None, audio_feature=None, - optimizer_generator=None, optimizer_discriminator=None, optimizer_kp_detector=None, optimizer_audio_feature = None): - checkpoint = torch.load(checkpoint_path) - if generator is not None: - generator.load_state_dict(checkpoint['generator']) - if kp_detector is not None: - kp_detector.load_state_dict(checkpoint['kp_detector']) - if discriminator is not None: - try: - discriminator.load_state_dict(checkpoint['discriminator']) - except: - print ('No discriminator in the state-dict. Dicriminator will be randomly initialized') - # if audio_feature is not None: - # audio_feature.load_state_dict(checkpoint['audio_feature']) - if optimizer_generator is not None: - optimizer_generator.load_state_dict(checkpoint['optimizer_generator']) - if optimizer_discriminator is not None: - try: - optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator']) - except RuntimeError as e: - print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized') - if optimizer_kp_detector is not None: - optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector']) - # if optimizer_audio_feature is not None: - # a = checkpoint['optimizer_kp_detector']['param_groups'] - # a[0].pop('params') - # optimizer_audio_feature.load_state_dict(checkpoint['optimizer_audio_feature']) - - return checkpoint['epoch'] - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if 'models' in self.__dict__: - self.save_cpk() - self.log_file.close() - - def log_iter(self, losses): - losses = collections.OrderedDict(losses.items()) - if self.names is None: - self.names = list(losses.keys()) - self.loss_list.append(list(losses.values())) - - def log_epoch(self, epoch, step, models, inp, out): - self.epoch = epoch - self.step = step - self.models = models - if (self.epoch + 1) % self.checkpoint_freq == 0: - self.save_cpk() - self.log_scores(self.names) - self.visualize_rec(inp, out) - - -class Visualizer: - def __init__(self, kp_size=5, draw_border=False, colormap='gist_rainbow'): - self.kp_size = kp_size - self.draw_border = draw_border - self.colormap = plt.get_cmap(colormap) - - def draw_image_with_kp(self, image, kp_array): - image = np.copy(image) - spatial_size = np.array(image.shape[:2][::-1])[np.newaxis] - kp_array = spatial_size * (kp_array + 1) / 2 - num_kp = kp_array.shape[0] - for kp_ind, kp in enumerate(kp_array): - rr, cc = circle(kp[1], kp[0], self.kp_size, shape=image.shape[:2]) - image[rr, cc] = np.array(self.colormap(kp_ind / num_kp))[:3] - return image - - def create_image_column_with_kp(self, images, kp): - image_array = np.array([self.draw_image_with_kp(v, k) for v, k in zip(images, kp)]) - return self.create_image_column(image_array) - - def create_image_column(self, images): - if self.draw_border: - images = np.copy(images) - images[:, :, [0, -1]] = (1, 1, 1) - images[:, :, [0, -1]] = (1, 1, 1) - return np.concatenate(list(images), axis=0) - - def create_image_grid(self, *args): - out = [] - for arg in args: - if type(arg) == tuple: - out.append(self.create_image_column_with_kp(arg[0], arg[1])) - else: - out.append(self.create_image_column(arg)) - return np.concatenate(out, axis=1) - - def visualize(self, driving, transformed_driving, source, out): - images = [] - - - # Source image with keypoints - source = source.data.cpu() - kp_source = out['kp_source']['value'].data.cpu().numpy() - source = np.transpose(source, [0, 2, 3, 1]) - images.append((source, kp_source)) - - # Equivariance visualization - if 'transformed_frame' in out: - transformed = out['transformed_frame'].data.cpu().numpy() - transformed = np.transpose(transformed, [0, 2, 3, 1]) - transformed_kp = out['transformed_kp']['value'].data.cpu().numpy() - images.append((transformed, transformed_kp)) - - # Equivariance visualization - transformed_driving = transformed_driving.data.cpu().numpy() - transformed_driving = np.transpose(transformed_driving, [0, 2, 3, 1]) - images.append(transformed_driving) - - # Driving image with keypoints - kp_driving = out['kp_driving'][-1]['value'].data.cpu().numpy() #[-1]['value'] - driving = driving.data.cpu().numpy() - driving = np.transpose(driving, [0, 2, 3, 1]) - images.append((driving, kp_driving)) - - - - # Deformed image - if 'deformed' in out: - deformed = out['deformed'].data.cpu().numpy() - deformed = np.transpose(deformed, [0, 2, 3, 1]) - images.append(deformed) - - # Result with and without keypoints - prediction = out['prediction'].data.cpu().numpy() - prediction = np.transpose(prediction, [0, 2, 3, 1]) - if 'kp_norm' in out: - kp_norm = out['kp_norm']['value'].data.cpu().numpy() - images.append((prediction, kp_norm)) - images.append(prediction) - - - ## Occlusion map - if 'occlusion_map' in out: - occlusion_map = out['occlusion_map'].data.cpu().repeat(1, 3, 1, 1) - occlusion_map = F.interpolate(occlusion_map, size=source.shape[1:3]).numpy() - occlusion_map = np.transpose(occlusion_map, [0, 2, 3, 1]) - images.append(occlusion_map) - - # Deformed images according to each individual transform - if 'sparse_deformed' in out: - full_mask = [] - for i in range(out['sparse_deformed'].shape[1]): - image = out['sparse_deformed'][:, i].data.cpu() - image = F.interpolate(image, size=source.shape[1:3]) - mask = out['mask'][:, i:(i+1)].data.cpu().repeat(1, 3, 1, 1) - mask = F.interpolate(mask, size=source.shape[1:3]) - image = np.transpose(image.numpy(), (0, 2, 3, 1)) - mask = np.transpose(mask.numpy(), (0, 2, 3, 1)) - - if i != 0: - color = np.array(self.colormap((i - 1) / (out['sparse_deformed'].shape[1] - 1)))[:3] - else: - color = np.array((0, 0, 0)) - - color = color.reshape((1, 1, 1, 3)) - - images.append(image) - if i != 0: - images.append(mask * color) - else: - images.append(mask) - - full_mask.append(mask * color) - - images.append(sum(full_mask)) - - image = self.create_image_grid(*images) - image = (255 * image).astype(np.uint8) - return image diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/deploy_space.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/deploy_space.py deleted file mode 100644 index 9014b4e24ea2987d05dcf6ad58a6f0ee437646de..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/deploy_space.py +++ /dev/null @@ -1,175 +0,0 @@ -from __future__ import annotations - -import argparse -import os -import re - -import huggingface_hub - -import gradio as gr - -repo_directory = os.getcwd() -readme_file = os.path.join(repo_directory, "README.md") -github_action_template = os.path.join( - os.path.dirname(__file__), "deploy_space_action.yaml" -) - - -def add_configuration_to_readme( - title: str | None, - app_file: str | None, -) -> dict: - configuration = {} - - dir_name = os.path.basename(repo_directory) - if title is None: - title = input(f"Enter Spaces app title [{dir_name}]: ") or dir_name - formatted_title = format_title(title) - if formatted_title != title: - print(f"Formatted to {formatted_title}. ") - configuration["title"] = formatted_title - - if app_file is None: - for file in os.listdir(repo_directory): - file_path = os.path.join(repo_directory, file) - if not os.path.isfile(file_path) or not file.endswith(".py"): - continue - - with open(file_path, encoding="utf-8", errors="ignore") as f: - content = f.read() - if "import gradio" in content: - app_file = file - break - - app_file = ( - input(f"Enter Gradio app file {f'[{app_file}]' if app_file else ''}: ") - or app_file - ) - if not app_file or not os.path.exists(app_file): - raise FileNotFoundError("Failed to find Gradio app file.") - configuration["app_file"] = app_file - - configuration["sdk"] = "gradio" - configuration["sdk_version"] = gr.__version__ - huggingface_hub.metadata_save(readme_file, configuration) - - configuration["hardware"] = ( - input( - f"Enter Spaces hardware ({', '.join(hardware.value for hardware in huggingface_hub.SpaceHardware)}) [cpu-basic]: " - ) - or "cpu-basic" - ) - - secrets = {} - if input("Any Spaces secrets (y/n) [n]: ") == "y": - while True: - secret_name = input("Enter secret name (leave blank to end): ") - if not secret_name: - break - secret_value = input(f"Enter secret value for {secret_name}: ") - secrets[secret_name] = secret_value - configuration["secrets"] = secrets - - requirements_file = os.path.join(repo_directory, "requirements.txt") - if ( - not os.path.exists(requirements_file) - and input("Create requirements.txt file? (y/n) [n]: ").lower() == "y" - ): - while True: - requirement = input("Enter a dependency (leave blank to end): ") - if not requirement: - break - with open(requirements_file, "a") as f: - f.write(requirement + "\n") - - if ( - input( - "Create Github Action to automatically update Space on 'git push'? [n]: " - ).lower() - == "y" - ): - track_branch = input("Enter branch to track [main]: ") or "main" - github_action_file = os.path.join( - repo_directory, ".github/workflows/update_space.yml" - ) - os.makedirs(os.path.dirname(github_action_file), exist_ok=True) - with open(github_action_template) as f: - github_action_content = f.read() - github_action_content = github_action_content.replace("$branch", track_branch) - with open(github_action_file, "w") as f: - f.write(github_action_content) - - print( - "Github Action created. Add your Hugging Face write token (from https://huggingface.co/settings/tokens) as an Actions Secret named 'hf_token' to your GitHub repository. This can be set in your repository's settings page." - ) - - return configuration - - -def format_title(title: str): - title = title.replace(" ", "_") - title = re.sub(r"[^a-zA-Z0-9\-._]", "", title) - title = re.sub("-+", "-", title) - while title.startswith("."): - title = title[1:] - return title - - -def deploy(): - if ( - os.getenv("SYSTEM") == "spaces" - ): # in case a repo with this function is uploaded to spaces - return - parser = argparse.ArgumentParser(description="Deploy to Spaces") - parser.add_argument("deploy") - parser.add_argument("--title", type=str, help="Spaces app title") - parser.add_argument("--app-file", type=str, help="File containing the Gradio app") - - args = parser.parse_args() - - hf_api = huggingface_hub.HfApi() - whoami = None - login = False - try: - whoami = hf_api.whoami() - if whoami["auth"]["accessToken"]["role"] != "write": - login = True - except OSError: - login = True - if login: - print("Need 'write' access token to create a Spaces repo.") - huggingface_hub.login(add_to_git_credential=False) - whoami = hf_api.whoami() - - configuration: None | dict = None - if os.path.exists(readme_file): - try: - configuration = huggingface_hub.metadata_load(readme_file) - except ValueError: - pass - - if configuration is None: - print( - f"Creating new Spaces Repo in '{repo_directory}'. Collecting metadata, press Enter to accept default value." - ) - configuration = add_configuration_to_readme( - args.title, - args.app_file, - ) - - space_id = huggingface_hub.create_repo( - configuration["title"], - space_sdk="gradio", - repo_type="space", - exist_ok=True, - space_hardware=configuration.get("hardware"), - ).repo_id - hf_api.upload_folder( - repo_id=space_id, - repo_type="space", - folder_path=repo_directory, - ) - if configuration.get("secrets"): - for secret_name, secret_value in configuration["secrets"].items(): - huggingface_hub.add_space_secret(space_id, secret_name, secret_value) - print(f"Space available at https://huggingface.co/spaces/{space_id}") diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Player-1e00f554.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Player-1e00f554.css deleted file mode 100644 index a9fd7de561508b5989c623a98722cb397f7fd885..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Player-1e00f554.css +++ /dev/null @@ -1 +0,0 @@ -span.svelte-w5wajl.svelte-w5wajl{text-shadow:0 0 8px rgba(0,0,0,.5)}progress.svelte-w5wajl.svelte-w5wajl{margin-right:var(--size-3);border-radius:var(--radius-sm);width:var(--size-full);height:var(--size-2)}progress.svelte-w5wajl.svelte-w5wajl::-webkit-progress-bar{border-radius:2px;background-color:#fff3;overflow:hidden}progress.svelte-w5wajl.svelte-w5wajl::-webkit-progress-value{background-color:#ffffffe6}video.svelte-w5wajl.svelte-w5wajl{position:inherit;background-color:#000;width:var(--size-full);height:var(--size-full);object-fit:contain}.mirror.svelte-w5wajl.svelte-w5wajl{transform:scaleX(-1)}.controls.svelte-w5wajl.svelte-w5wajl{position:absolute;bottom:0;opacity:0;transition:.5s;margin:var(--size-2);border-radius:var(--radius-md);background:var(--color-grey-800);padding:var(--size-2) var(--size-1);width:calc(100% - .75rem);width:calc(100% - var(--size-2) * 2)}.wrap.svelte-w5wajl:hover .controls.svelte-w5wajl{opacity:1}.inner.svelte-w5wajl.svelte-w5wajl{display:flex;justify-content:space-between;align-items:center;padding-right:var(--size-2);padding-left:var(--size-2);width:var(--size-full);height:var(--size-full)}.icon.svelte-w5wajl.svelte-w5wajl{display:flex;justify-content:center;cursor:pointer;width:var(--size-6);color:#fff}.time.svelte-w5wajl.svelte-w5wajl{flex-shrink:0;margin-right:var(--size-3);margin-left:var(--size-3);color:#fff;font-size:var(--text-sm);font-family:var(--font-mono)}.wrap.svelte-w5wajl.svelte-w5wajl{position:relative;background-color:var(--background-fill-secondary);height:var(--size-full);width:var(--size-full)} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-d225313c.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-d225313c.js deleted file mode 100644 index dda913fd73b1b305dbb415c6878b93a7de7e193e..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-d225313c.js +++ /dev/null @@ -1,2 +0,0 @@ -import{T as s}from"./index-ea4a5a13.js";import"./index-39fce9e2.js";import"./Button-79f6e3bf.js";import"./BlockTitle-fa702e63.js";import"./Info-7c1e7874.js";import"./Copy-77b3f70c.js";const i=["static","dynamic"];export{s as Component,i as modes}; -//# sourceMappingURL=index-d225313c.js.map diff --git a/spaces/demongaara/Gaara-pokemon-stable-diffusion/app.py b/spaces/demongaara/Gaara-pokemon-stable-diffusion/app.py deleted file mode 100644 index a43dd97a9abd15193b068ee3802c89db7aa7cb81..0000000000000000000000000000000000000000 --- a/spaces/demongaara/Gaara-pokemon-stable-diffusion/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/justinpinkney/pokemon-stable-diffusion").launch() \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/How To Enter Unlock Code Huawei Fc312e.md b/spaces/diacanFperku/AutoGPT/How To Enter Unlock Code Huawei Fc312e.md deleted file mode 100644 index cf817c7b848256b176d1cbe6c54c45f8cba27114..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/How To Enter Unlock Code Huawei Fc312e.md +++ /dev/null @@ -1,6 +0,0 @@ -

      How to enter unlock code huawei fc312e


      DOWNLOADhttps://gohhs.com/2uFT7A



      - -Go-To Old Algo Code Calculator Tool Here and Enter Your IMEI Number in IMEI Box, Click On Calculate and Check The Code Unlocking Huawei e5577c by ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/diacanFperku/AutoGPT/Intericad T6 Full Cracked Part 1.iso NEW.md b/spaces/diacanFperku/AutoGPT/Intericad T6 Full Cracked Part 1.iso NEW.md deleted file mode 100644 index 64a6d8eef2cb4c24035453d21a090e3934bb684f..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Intericad T6 Full Cracked Part 1.iso NEW.md +++ /dev/null @@ -1,128 +0,0 @@ -
      -

      InteriCAD T6 Full Cracked Part 1.iso: A Review of the Software and Its Features

      - -

      If you are looking for a powerful and easy-to-use interior design software, you might want to check out InteriCAD T6 Full Cracked Part 1.iso. This is a downloadable file that contains the first part of the full version of InteriCAD T6, a professional software that allows you to create stunning 3D renderings, animations, floor plans, and VR presentations of your interior design projects.

      -

      intericad t6 full cracked part 1.iso


      Download Zip ☆☆☆ https://gohhs.com/2uFV8L



      - -

      In this article, we will review the software and its features, and show you how to download and install InteriCAD T6 Full Cracked Part 1.iso on your computer.

      - -

      What is InteriCAD T6?

      - -

      InteriCAD T6 is a software developed by YFCAD, a leading company in the field of interior design software. InteriCAD T6 is designed to help interior designers, architects, and decorators to create realistic and impressive visualizations of their design ideas.

      - -

      InteriCAD T6 has several features that make it stand out from other software in the market. Some of these features are:

      -

      - -
        -
      • A user-friendly interface that allows you to easily access all the tools and functions you need.
      • -
      • A large library of furniture, materials, accessories, lighting, and textures that you can drag and drop into your design.
      • -
      • A smart design system that automatically generates floor plans, elevations, and dimensions based on your 3D model.
      • -
      • A fast rendering engine that produces high-quality images and animations in minutes.
      • -
      • A VR function that lets you experience your design in virtual reality using a VR headset or a smartphone.
      • -
      - -

      With InteriCAD T6, you can create stunning presentations for your clients and showcase your design skills in a professional way.

      - -

      How to Download and Install InteriCAD T6 Full Cracked Part 1.iso?

      - -

      If you want to try out InteriCAD T6 for free, you can download InteriCAD T6 Full Cracked Part 1.iso from the link below. This file contains the first part of the full version of InteriCAD T6, which includes all the features and functions of the software. You will need to download another file (InteriCAD T6 Full Cracked Part 2.iso) to complete the installation.

      - -

      To download and install InteriCAD T6 Full Cracked Part 1.iso, follow these steps:

      - -
        -
      1. Click on the link below to download InteriCAD T6 Full Cracked Part 1.iso.
      2. -
      3. Save the file on your computer and extract it using a program like WinRAR or 7-Zip.
      4. -
      5. Open the extracted folder and run the setup.exe file.
      6. -
      7. Follow the instructions on the screen to install InteriCAD T6 on your computer.
      8. -
      9. When the installation is finished, do not run the software yet. You will need to download InteriCAD T6 Full Cracked Part 2.iso to activate the software.
      10. -
      - -

      Download link: https://blltly.com/2ty9kW

      - -

      Conclusion

      - -

      InteriCAD T6 is a professional interior design software that can help you create amazing 3D renderings, animations, floor plans, and VR presentations of your design projects. You can download InteriCAD T6 Full Cracked Part 1.iso from the link above and install it on your computer for free. However, you will need to download another file (InteriCAD T6 Full Cracked Part 2.iso) to complete the installation and activate the software.

      - -

      If you have any questions or comments about InteriCAD T6 or InteriCAD T6 Full Cracked Part 1.iso, feel free to leave them below. We hope you enjoy using InteriCAD T6 and create stunning designs with it.

      -

      How to Use InteriCAD T6 Full Cracked Part 1.iso for Your Interior Design Projects?

      - -

      Now that you have downloaded and installed InteriCAD T6 Full Cracked Part 1.iso, you might be wondering how to use it for your interior design projects. In this section, we will show you some tips and tricks on how to use InteriCAD T6 to create amazing designs and presentations.

      - -

      Here are some steps you can follow to use InteriCAD T6 Full Cracked Part 1.iso:

      - -
        -
      1. Launch InteriCAD T6 from your desktop or start menu.
      2. -
      3. Select a project type from the main menu. You can choose from residential, commercial, or landscape projects.
      4. -
      5. Choose a template or create a new project from scratch. You can also import your own CAD files or floor plans.
      6. -
      7. Use the tools and functions on the left panel to design your space. You can add walls, doors, windows, floors, ceilings, stairs, columns, beams, etc.
      8. -
      9. Use the library on the right panel to furnish and decorate your space. You can drag and drop furniture, materials, accessories, lighting, and textures into your design. You can also customize the size, color, shape, and orientation of the items.
      10. -
      11. Use the smart design system to generate floor plans, elevations, and dimensions automatically based on your 3D model.
      12. -
      13. Use the rendering engine to produce high-quality images and animations of your design. You can adjust the camera angle, lighting, shadow, reflection, etc.
      14. -
      15. Use the VR function to experience your design in virtual reality. You can use a VR headset or a smartphone to view your design in 360 degrees.
      16. -
      17. Save and export your project as a file or a presentation. You can also share your project online or print it out.
      18. -
      - -

      With InteriCAD T6 Full Cracked Part 1.iso, you can create stunning interior design projects in a fast and easy way. You can impress your clients and showcase your design skills with InteriCAD T6.

      - -

      Conclusion

      - -

      In this article, we have reviewed InteriCAD T6 Full Cracked Part 1.iso, a professional interior design software that allows you to create realistic and impressive visualizations of your design ideas. We have shown you how to download and install InteriCAD T6 Full Cracked Part 1.iso on your computer for free. We have also shown you how to use InteriCAD T6 Full Cracked Part 1.iso for your interior design projects.

      - -

      If you have any questions or comments about InteriCAD T6 or InteriCAD T6 Full Cracked Part 1.iso, feel free to leave them below. We hope you enjoy using InteriCAD T6 and create stunning designs with it.

      -

      What are the Benefits of Using InteriCAD T6 Full Cracked Part 1.iso?

      - -

      Using InteriCAD T6 Full Cracked Part 1.iso can bring you many benefits as an interior designer, architect, or decorator. Here are some of the benefits you can enjoy by using InteriCAD T6 Full Cracked Part 1.iso:

      - -
        -
      • You can save time and money by using InteriCAD T6 Full Cracked Part 1.iso. You don't need to buy expensive software licenses or hardware upgrades to use InteriCAD T6. You can also work faster and more efficiently by using the smart design system and the fast rendering engine.
      • -
      • You can improve your design skills and creativity by using InteriCAD T6 Full Cracked Part 1.iso. You can learn from the large library of furniture, materials, accessories, lighting, and textures that are available in InteriCAD T6. You can also experiment with different styles, colors, shapes, and layouts to create unique and original designs.
      • -
      • You can impress your clients and increase your reputation by using InteriCAD T6 Full Cracked Part 1.iso. You can create realistic and impressive visualizations of your design ideas that will wow your clients and potential customers. You can also present your designs in a professional way by using the 3D animations, floor plans, and VR presentations.
      • -
      - -

      Using InteriCAD T6 Full Cracked Part 1.iso can help you achieve your goals and dreams as an interior designer, architect, or decorator. You can create stunning interior design projects that will make you proud and happy.

      - -

      Where to Find More Information About InteriCAD T6 Full Cracked Part 1.iso?

      - -

      If you want to find more information about InteriCAD T6 Full Cracked Part 1.iso, you can visit the following websites:

      - -
        -
      • The official website of YFCAD, the developer of InteriCAD T6. You can find more details about the software and its features, as well as tutorials, videos, FAQs, and customer support. The website is https://www.yfcad.com/intericad-t6/.
      • -
      • The official YouTube channel of YFCAD, where you can watch videos of InteriCAD T6 projects, tips, tricks, and demos. The channel is https://www.youtube.com/user/YFCAD.
      • -
      • The official Facebook page of YFCAD, where you can follow the latest news and updates about InteriCAD T6, as well as interact with other users and fans. The page is https://www.facebook.com/YFCAD.
      • -
      - -

      These websites can help you learn more about InteriCAD T6 Full Cracked Part 1.iso and how to use it for your interior design projects.

      - -

      Conclusion

      - -

      In this article, we have reviewed InteriCAD T6 Full Cracked Part 1.iso, a professional interior design software that allows you to create realistic and impressive visualizations of your design ideas. We have shown you how to download and install InteriCAD T6 Full Cracked Part 1.iso on your computer for free. We have also shown you how to use InteriCAD T6 Full Cracked Part 1.iso for your interior design projects. We have also shown you the benefits of using InteriCAD T6 Full Cracked Part 1.iso and where to find more information about it.

      - -

      If you have any questions or comments about InteriCAD T6 or InteriCAD T6 Full Cracked Part 1.iso, feel free to leave them below. We hope you enjoy using InteriCAD T6 and create stunning designs with it.

      -

      How to Download InteriCAD T6 Full Cracked Part 2.iso?

      - -

      In the previous sections, we have shown you how to download and install InteriCAD T6 Full Cracked Part 1.iso on your computer for free. However, you will need to download another file (InteriCAD T6 Full Cracked Part 2.iso) to complete the installation and activate the software.

      - -

      InteriCAD T6 Full Cracked Part 2.iso is a downloadable file that contains the second part of the full version of InteriCAD T6, which includes the keygen and the activator for the software. You will need to use these tools to generate a serial number and a license code for InteriCAD T6.

      - -

      To download and install InteriCAD T6 Full Cracked Part 2.iso, follow these steps:

      - -
        -
      1. Click on the link below to download InteriCAD T6 Full Cracked Part 2.iso.
      2. -
      3. Save the file on your computer and extract it using a program like WinRAR or 7-Zip.
      4. -
      5. Open the extracted folder and run the keygen.exe file.
      6. -
      7. Copy the serial number that appears on the screen.
      8. -
      9. Go back to InteriCAD T6 and enter the serial number when prompted.
      10. -
      11. Run the activator.exe file and copy the license code that appears on the screen.
      12. -
      13. Go back to InteriCAD T6 and enter the license code when prompted.
      14. -
      15. Restart InteriCAD T6 and enjoy the full version of the software.
      16. -
      - -

      Download link: https://blltly.com/2ty9kW

      - -

      Conclusion

      - -

      In this article, we have reviewed InteriCAD T6 Full Cracked Part 1.iso, a professional interior design software that allows you to create realistic and impressive visualizations of your design ideas. We have shown you how to download and install InteriCAD T6 Full Cracked Part 1.iso on your computer for free. We have also shown you how to use InteriCAD T6 Full Cracked Part 1.iso for your interior design projects. We have also shown you the benefits of using InteriCAD T6 Full Cracked Part 1.iso and where to find more information about it. Finally, we have shown you how to download and install InteriCAD T6 Full Cracked Part 2.iso to complete the installation and activate the software.

      - -

      If you have any questions or comments about InteriCAD T6 or InteriCAD T6 Full Cracked Part 1.iso or InteriCAD T6 Full Cracked Part 2.iso, feel free to leave them below. We hope you enjoy using InteriCAD T6 and create stunning designs with it.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Microsoft Windows Small Business Server 2011 Iso Download.md b/spaces/diacanFperku/AutoGPT/Microsoft Windows Small Business Server 2011 Iso Download.md deleted file mode 100644 index dab94fc580acee3f029eca1263e370adf1058038..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Microsoft Windows Small Business Server 2011 Iso Download.md +++ /dev/null @@ -1,27 +0,0 @@ - -

      How to Download and Install Microsoft Windows Small Business Server 2011 Standard

      -

      If you are looking for a reliable and affordable server solution for your small business, you might want to consider Microsoft Windows Small Business Server 2011 Standard. This is a version of Windows Server 2008 R2 that is designed specifically for small businesses with up to 75 users. It includes features such as Exchange Server 2010, SharePoint Foundation 2010, Windows Server Update Services, and Remote Web Access.

      -

      microsoft windows small business server 2011 iso download


      Download ✶✶✶ https://gohhs.com/2uFVgu



      -

      However, finding the ISO file for this product can be challenging, as Microsoft has discontinued its support and distribution. In this article, we will show you how to download and install Microsoft Windows Small Business Server 2011 Standard from a trusted source.

      -

      Step 1: Get a valid product key

      -

      Before you can download and install Microsoft Windows Small Business Server 2011 Standard, you will need a valid product key. This is a 25-digit code that proves that you have purchased a legitimate copy of the software. You can find your product key on the original packaging of the product, on a sticker attached to your server, or in an email confirmation from Microsoft or your reseller.

      -

      If you have lost or misplaced your product key, you can try to contact Microsoft or your reseller for assistance. However, they may not be able to provide you with a replacement key, especially if the product is no longer supported. In that case, you may need to purchase a new license or upgrade to a newer version of Windows Server.

      -

      Step 2: Download the ISO file

      -

      Once you have your product key, you can proceed to download the ISO file for Microsoft Windows Small Business Server 2011 Standard. An ISO file is a single file that contains all the data and files needed to install the software. You can burn it to a DVD or mount it to a virtual drive using a tool such as Daemon Tools.

      -

      There are several sources where you can download the ISO file for Microsoft Windows Small Business Server 2011 Standard. However, not all of them are reliable or safe. Some may contain viruses, malware, or corrupted files that can harm your server or compromise your data. Therefore, you should only download the ISO file from a trusted source.

      -

      -

      One of the trusted sources where you can download the ISO file for Microsoft Windows Small Business Server 2011 Standard is MSDN (Microsoft Developer Network). This is a website where developers and IT professionals can access various Microsoft products and resources. However, you will need an MSDN subscription to access the ISO file. You can sign up for an MSDN subscription here: https://msdn.microsoft.com/en-us/subscriptions/aa948868.aspx

      -

      Another trusted source where you can download the ISO file for Microsoft Windows Small Business Server 2011 Standard is Dell Community. This is a forum where Dell customers and experts can share their knowledge and experience with Dell products and services. You can find a link to download the ISO file here: https://www.dell.com/community/PowerEdge-Hardware-General/Where-can-I-download-Windows-Small-Business-Server-2011-Standard/td-p/7644124

      -

      Alternatively, you can also download the ISO file from Archive.org. This is a website that preserves and provides access to various digital content and media. You can find a link to download the ISO file here: https://archive.org/details/windows_small_business_server_2011_standard_x64

      -

      Step 3: Install the software

      -

      After you have downloaded the ISO file for Microsoft Windows Small Business Server 2011 Standard, you can install it on your server. You will need a DVD drive or a virtual drive to mount the ISO file. You will also need to backup your data and settings before installing the software.

      -

      To install Microsoft Windows Small Business Server 2011 Standard, follow these steps:

      -
        -
      1. Insert the DVD or mount the ISO file to your server.
      2. -
      3. Restart your server and boot from the DVD or virtual drive.
      4. -
      5. Follow the on-screen instructions to select your language, time zone, and keyboard layout.
      6. -
      7. Enter your product key when prompted.
      8. -
      9. Accept the license agreement and click Next.
      10. -
      11. Select Custom (advanced) as the installation type.
      12. d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/diagaiwei/ir_chinese_medqa/colbert/modeling/tokenization/utils.py b/spaces/diagaiwei/ir_chinese_medqa/colbert/modeling/tokenization/utils.py deleted file mode 100644 index 914a4f59d18b62efae9a7dc4c9f661a03c156de3..0000000000000000000000000000000000000000 --- a/spaces/diagaiwei/ir_chinese_medqa/colbert/modeling/tokenization/utils.py +++ /dev/null @@ -1,63 +0,0 @@ -import torch - - -def tensorize_triples(query_tokenizer, doc_tokenizer, queries, passages, scores, bsize, nway): - # assert len(passages) == len(scores) == bsize * nway - # assert bsize is None or len(queries) % bsize == 0 - - # N = len(queries) - Q_ids, Q_mask = query_tokenizer.tensorize(queries) - D_ids, D_mask = doc_tokenizer.tensorize(passages) - # D_ids, D_mask = D_ids.view(2, N, -1), D_mask.view(2, N, -1) - - # # Compute max among {length of i^th positive, length of i^th negative} for i \in N - # maxlens = D_mask.sum(-1).max(0).values - - # # Sort by maxlens - # indices = maxlens.sort().indices - # Q_ids, Q_mask = Q_ids[indices], Q_mask[indices] - # D_ids, D_mask = D_ids[:, indices], D_mask[:, indices] - - # (positive_ids, negative_ids), (positive_mask, negative_mask) = D_ids, D_mask - - query_batches = _split_into_batches(Q_ids, Q_mask, bsize) - doc_batches = _split_into_batches(D_ids, D_mask, bsize * nway) - # positive_batches = _split_into_batches(positive_ids, positive_mask, bsize) - # negative_batches = _split_into_batches(negative_ids, negative_mask, bsize) - - if len(scores): - score_batches = _split_into_batches2(scores, bsize * nway) - else: - score_batches = [[] for _ in doc_batches] - - batches = [] - for Q, D, S in zip(query_batches, doc_batches, score_batches): - batches.append((Q, D, S)) - - return batches - - -def _sort_by_length(ids, mask, bsize): - if ids.size(0) <= bsize: - return ids, mask, torch.arange(ids.size(0)) - - indices = mask.sum(-1).sort().indices - reverse_indices = indices.sort().indices - - return ids[indices], mask[indices], reverse_indices - - -def _split_into_batches(ids, mask, bsize): - batches = [] - for offset in range(0, ids.size(0), bsize): - batches.append((ids[offset:offset+bsize], mask[offset:offset+bsize])) - - return batches - - -def _split_into_batches2(scores, bsize): - batches = [] - for offset in range(0, len(scores), bsize): - batches.append(scores[offset:offset+bsize]) - - return batches diff --git a/spaces/digitalxingtong/Un-Bert-Vits2/README.md b/spaces/digitalxingtong/Un-Bert-Vits2/README.md deleted file mode 100644 index 9681e2b40f2bfe8dbec7cfd0e4401598bf02c6ca..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Un-Bert-Vits2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AI柚恩 -emoji: 🌟 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/text/english_bert_mock.py b/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/text/english_bert_mock.py deleted file mode 100644 index 3b894ced5b6d619a18d6bdd7d7606ba9e6532050..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/text/english_bert_mock.py +++ /dev/null @@ -1,5 +0,0 @@ -import torch - - -def get_bert_feature(norm_text, word2ph): - return torch.zeros(1024, sum(word2ph)) diff --git a/spaces/donjuanplatinum/code/README.md b/spaces/donjuanplatinum/code/README.md deleted file mode 100644 index f30a84cae819c351b9f2804266eefcd1ed5e7940..0000000000000000000000000000000000000000 --- a/spaces/donjuanplatinum/code/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Code -emoji: 🌖 -colorFrom: gray -colorTo: gray -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: gpl-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dory111111/babyagi-streamlit/README.md b/spaces/dory111111/babyagi-streamlit/README.md deleted file mode 100644 index 0cb6721eba59e4794834d0eeae150215cf0b9319..0000000000000000000000000000000000000000 --- a/spaces/dory111111/babyagi-streamlit/README.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Babyagi Streamlit -sdk: streamlit -emoji: 🚀 -colorFrom: gray -colorTo: gray -sdk_version: 1.17.0 -app_file: babyagi.py ---- \ No newline at end of file diff --git a/spaces/epsilonator/euclidean_distance/app.py b/spaces/epsilonator/euclidean_distance/app.py deleted file mode 100644 index 1c35ec6db4c0c350d712a7879be200b8f862b068..0000000000000000000000000000000000000000 --- a/spaces/epsilonator/euclidean_distance/app.py +++ /dev/null @@ -1,6 +0,0 @@ -import evaluate - -from evaluate.utils import launch_gradio_widget - -euclidean_distance = evaluate.load('epsilonator/euclidean_distance') -launch_gradio_widget(euclidean_distance) \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX Crack.md b/spaces/falterWliame/Face_Mask_Detection/Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX Crack.md deleted file mode 100644 index 6e1dcae36f5bf662722a418c5e664b382266f8cb..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX Crack.md +++ /dev/null @@ -1,149 +0,0 @@ -
        -

        Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX Crack: A Guide for Gamers

        - -

        If you are a fan of strategy games, you might have heard of Age of Empires II HD: The African Kingdoms, the second new official expansion for the Age of Empires II universe in over 16 years. This expansion adds four new civilizations, new units, ships and technologies, as well as four new campaigns that take you through the African continent.

        - -

        However, if you want to enjoy this expansion without paying for it, you might be looking for a crack that allows you to bypass the Steam and CEG protection. In this article, we will show you how to download and install the Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX crack, which fixes some common issues and errors that occurred in the initial release.

        -

        Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX Crack


        Download Zip ===> https://urlca.com/2uDduW



        - -

        How to Download Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX Crack

        - -

        The first step is to download the crack from a reliable source. You can find many websites that offer the crack, but be careful of malware and viruses that might harm your computer. One of the websites that we recommend is MegaGames, which provides a safe and fast download link for the crack.

        - -

        To download the crack from MegaGames, follow these steps:

        - -
          -
        1. Go to this page and click on the "Download" button.
        2. -
        3. Wait for a few seconds until the download starts automatically. If it doesn't, click on the "click here" link to start it manually.
        4. -
        5. Save the file to your preferred location on your computer. The file name is AOE.2.HD.TAK.V20151105.ALL.CODEX.NODVD.ZIP and it has a size of 4.9 MB.
        6. -
        - -

        How to Install Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX Crack

        - -

        The next step is to install the crack to your game directory. You will need to have the base game Age of Empires II (2013) and the expansion Age of Empires II (2013): The African Kingdoms installed on your computer before applying the crack.

        - -

        To install the crack, follow these steps:

        - -
          -
        1. Extract the ZIP file that you downloaded using a program like WinRAR or 7-Zip.
        2. -
        3. Copy the fixed crack files from the extracted folder to your game installation directory. The default location is C:\Program Files (x86)\Steam\steamapps\common\Age2HD.
        4. -
        5. Replace the existing files when prompted.
        6. -
        7. Run the game from Steam or from the game's executable file (AoK HD.exe).
        8. -
        - -

        How to Play Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX Crack

        - -

        The final step is to enjoy the game with the crack. You should be able to access all the features and content of the expansion without any problems or errors. You can play online with other players who have the same crack, or offline with AI opponents.

        - -

        To play the game with the crack, follow these steps:

        - -
          -
        1. Select your preferred language and resolution from the game launcher.
        2. -
        3. Choose your game mode from the main menu. You can play standard campaigns, random maps, scenarios, or custom games.
        4. -
        5. Select your civilization and difficulty level from the game settings.
        6. -
        7. Start the game and have fun!
        8. -
        - -

        Conclusion

        - -

        In this article, we have shown you how to download and install Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX crack, which allows you to play Age of Empires II HD: The African Kingdoms for free. We hope that this guide was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below.

        -

        -

        Benefits of Using Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX Crack

        - -

        Using the Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX crack has many benefits for gamers who want to enjoy the game without spending money. Some of the benefits are:

        - -
          -
        • You can play the game for free, without having to buy the base game or the expansion.
        • -
        • You can access all the features and content of the expansion, including the new civilizations, units, campaigns, and game modes.
        • -
        • You can play online with other players who have the same crack, or offline with AI opponents.
        • -
        • You can avoid any problems or errors that occurred in the initial release of the crack, such as issues with starting campaigns, random maps, or building some buildings.
        • -
        • You can update the game with future patches and fixes without losing the crack.
        • -
        - -

        Drawbacks of Using Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX Crack

        - -

        However, using the Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX crack also has some drawbacks that you should be aware of. Some of the drawbacks are:

        - -
          -
        • You might encounter some bugs or glitches that are not fixed by the crack.
        • -
        • You might face some legal issues or penalties for using a cracked version of the game.
        • -
        • You might not be able to play online with players who have the original version of the game or a different crack.
        • -
        • You might not be able to access some features or content that are exclusive to the original version of the game or require a valid Steam account.
        • -
        • You might not be able to support the developers and publishers of the game who worked hard to create it.
        • -
        - -

        Conclusion

        - -

        In this article, we have shown you how to download and install Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX crack, which allows you to play Age of Empires II HD: The African Kingdoms for free. We have also discussed some of the benefits and drawbacks of using this crack. We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below.

        -

        Reviews and Ratings of Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX Crack

        - -

        Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX crack has received positive reviews and ratings from gamers who have used it. Here are some of the comments and feedback that they have shared:

        - -
        -

        "This crack works perfectly! I can play the game without any problems or errors. The new civilizations and campaigns are awesome. Thank you so much!"

        -- John, a satisfied gamer -
        - -
        -

        "I love this crack! It's easy to install and use. The game runs smoothly and fast. The new units and technologies are very cool. I recommend this crack to anyone who wants to play Age of Empires II HD: The African Kingdoms for free."

        -- Lisa, a happy gamer -
        - -
        -

        "This crack is amazing! It fixes all the issues that I had with the initial release of the crack. The game is more stable and enjoyable. The new maps and game modes are fun and challenging. This is the best crack ever!"

        -- Mike, an impressed gamer -
        - -

        FAQs about Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX Crack

        - -

        If you have any questions or doubts about Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX crack, you might find the answers in this section. Here are some of the frequently asked questions and their answers:

        - -
        -
        Q: Do I need to have the base game or the expansion installed before applying the crack?
        -
        A: Yes, you need to have both the base game Age of Empires II (2013) and the expansion Age of Empires II (2013): The African Kingdoms installed on your computer before applying the crack.
        - -
        Q: Can I play online with other players who have the original version of the game or a different crack?
        -
        A: No, you can only play online with other players who have the same crack as you. If you want to play online with players who have the original version of the game or a different crack, you need to buy the game or use a different crack.
        - -
        Q: Will I get banned or penalized for using this crack?
        -
        A: There is a risk of getting banned or penalized for using this crack, especially if you play online or use Steam features. We do not take any responsibility for any consequences that may arise from using this crack. Use it at your own risk.
        - -
        Q: How can I update the game with future patches and fixes without losing the crack?
        -
        A: You can update the game with future patches and fixes by downloading them from reliable sources and applying them to your game directory. However, you need to make sure that the patches and fixes are compatible with your crack and do not overwrite it.
        -
        -

        Where to Buy Age of Empires II (2013): The African Kingdoms

        - -

        If you are interested in buying Age of Empires II (2013): The African Kingdoms, you have several options to choose from. The most convenient and popular option is to buy it from Steam, the online gaming platform that offers a variety of games and features. You can buy the expansion for $9.99 from Steam, or you can buy the bundle that includes the base game and all the expansions for $39.99.

        - -

        To buy Age of Empires II (2013): The African Kingdoms from Steam, follow these steps:

        - -
          -
        1. Go to this page and click on the "Add to Cart" button.
        2. -
        3. Sign in to your Steam account or create one if you don't have one.
        4. -
        5. Choose your payment method and complete the transaction.
        6. -
        7. Download and install the game from your Steam library.
        8. -
        - -

        Another option is to buy Age of Empires II (2013): The African Kingdoms from other online retailers, such as Amazon, G2A, or Humble Bundle. You can compare the prices and availability of these retailers and choose the one that suits you best. However, you should be careful of scams and frauds that might sell you fake or invalid keys. You should also check the compatibility and requirements of the game before buying it.

        - -

        How to Support the Developers and Publishers of Age of Empires II (2013): The African Kingdoms

        - -

        If you enjoy playing Age of Empires II (2013): The African Kingdoms, you might want to support the developers and publishers who worked hard to create it. By supporting them, you can help them continue making quality games and content for you and other gamers. Here are some ways that you can support them:

        - -
          -
        • Buy the game or the expansion from official sources, such as Steam or other online retailers.
        • -
        • Leave a positive review or rating on Steam or other platforms.
        • -
        • Share your feedback and suggestions on their official website or social media pages.
        • -
        • Recommend the game or the expansion to your friends and family.
        • -
        • Follow their news and updates on their official website or social media pages.
        • -
        • Buy their merchandise or donate to their causes.
        • -
        - -

        Conclusion

        - -

        In this article, we have shown you how to download and install Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX crack, which allows you to play Age of Empires II HD: The African Kingdoms for free. We have also discussed some of the benefits and drawbacks of using this crack. We have also given you some tips and tricks for playing the game with the crack, as well as some information on where to buy the game or the expansion and how to support the developers and publishers. We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below.

        -

        Conclusion

        - -

        In this article, we have shown you how to download and install Age.of.Empires.II.HD.The.African.Kingdoms.Crackfix-CODEX crack, which allows you to play Age of Empires II HD: The African Kingdoms for free. We have also discussed some of the benefits and drawbacks of using this crack. We have also given you some tips and tricks for playing the game with the crack, as well as some information on where to buy the game or the expansion and how to support the developers and publishers. We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below.

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Bonecraft Crack Keygen Serial 13 !!TOP!!.md b/spaces/falterWliame/Face_Mask_Detection/Bonecraft Crack Keygen Serial 13 !!TOP!!.md deleted file mode 100644 index ac0373031497c9fd92834c597506b1108190e15b..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Bonecraft Crack Keygen Serial 13 !!TOP!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Bonecraft Crack Keygen Serial 13


        DOWNLOAD >>> https://urlca.com/2uDcNH



        - - d5da3c52bf
        -
        -
        -

        diff --git a/spaces/falterWliame/Face_Mask_Detection/Fake Np V9 Zip.md b/spaces/falterWliame/Face_Mask_Detection/Fake Np V9 Zip.md deleted file mode 100644 index bc6461452f4554167a06b22a166f09337ee8228a..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Fake Np V9 Zip.md +++ /dev/null @@ -1,6 +0,0 @@ -

        fake np v9 zip


        Download Zip ››››› https://urlca.com/2uDduD



        -
        - . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 4fefd39f24
        -
        -
        -

        diff --git a/spaces/falterWliame/Face_Mask_Detection/Mystery.P.I.The.Curious.Case.of.Counterfeit.Cove.v1.Cracked F4CG.rar 1 !EXCLUSIVE!.md b/spaces/falterWliame/Face_Mask_Detection/Mystery.P.I.The.Curious.Case.of.Counterfeit.Cove.v1.Cracked F4CG.rar 1 !EXCLUSIVE!.md deleted file mode 100644 index d632dc0004ac56f4007f37bf4a95cde2bf6ec47e..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Mystery.P.I.The.Curious.Case.of.Counterfeit.Cove.v1.Cracked F4CG.rar 1 !EXCLUSIVE!.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Mystery.P.I.The.Curious.Case.of.Counterfeit.Cove.v1.Cracked F4CG.rar 1


        Download Zip ☆☆☆ https://urlca.com/2uDdAH



        - -Jays Shell Booter v5.1 (Jays Booter)][650+ Shells][300+ Private Shells]. Description ... Mystery.P.I.The.Curious.Case.of.Counterfeit.Cove.v1.Cracked F4CG.rar 1. 1fdad05405
        -
        -
        -

        diff --git a/spaces/fatiXbelha/sd/Bass Fishing 3D The Ultimate Android Game for Anglers.md b/spaces/fatiXbelha/sd/Bass Fishing 3D The Ultimate Android Game for Anglers.md deleted file mode 100644 index 713f3b17115fe4c180f9a17dd063e02075cc46c9..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Bass Fishing 3D The Ultimate Android Game for Anglers.md +++ /dev/null @@ -1,122 +0,0 @@ -
        -

        Bass Fishing 3D APK: A Realistic and Exciting Fishing Game for Android

        -

        If you love fishing, especially bass fishing, you might want to try Bass Fishing 3D APK, a realistic and exciting fishing game for Android devices. This game will let you experience the thrill of competitive big bass fishing in various locations across the US. You will be able to use a variety of equipment, lures, and techniques to catch different types of bass, from largemouth to peacock. You will also be able to compete in tournaments, challenges, and free fishing modes, as well as level up your skills and talents. In this article, we will show you what Bass Fishing 3D APK is all about, how to play it, and some tips and tricks to help you catch more fish.

        -

        bass fishing 3d apk


        Download File ››››› https://urllie.com/2uNGlJ



        -

        What is Bass Fishing 3D APK and why is it popular among anglers?

        -

        Bass Fishing 3D APK is a free-to-play fishing game developed by pascal inc., a Japanese company that specializes in creating realistic fishing games. The game was released in 2011 and has since been downloaded over 10 million times on Google Play Store. The game has received positive reviews from users who praised its graphics, gameplay, and variety of fish species.

        -

        Bass Fishing 3D APK is popular among anglers because it simulates the real lure action and tactics of bass fishing. You can steer your bass boat freely, find your own casting points by using dynamic driving bass boat, use the sonar to detect the shadow of fish, choose from ten types of lures (top, shallow, deep, bottom), and see the underwater camera when you are retrieving. You can also feel the breathless excitement when you hook a big bass and fight it with your rod.

        -

        How to download and install Bass Fishing 3D APK on your device?

        -

        To download and install Bass Fishing 3D APK on your device, you need to follow these steps:

        -

        bass fishing 3d game download
        -bass fishing 3d free online
        -bass fishing 3d mod apk
        -bass fishing 3d android
        -bass fishing 3d pc
        -bass fishing 3d apk pure
        -bass fishing 3d hack
        -bass fishing 3d cheats
        -bass fishing 3d offline
        -bass fishing 3d simulator
        -bass fishing 3d review
        -bass fishing 3d tips
        -bass fishing 3d latest version
        -bass fishing 3d update
        -bass fishing 3d guide
        -bass fishing 3d best lures
        -bass fishing 3d tournament mode
        -bass fishing 3d challenge mode
        -bass fishing 3d free mode
        -bass fishing 3d online multiplayer
        -bass fishing 3d realistic graphics
        -bass fishing 3d boat steering
        -bass fishing 3d rod selection
        -bass fishing 3d reel control
        -bass fishing 3d weather effects
        -bass fishing 3d lake maps
        -bass fishing 3d fish finder
        -bass fishing 3d trophy room
        -bass fishing 3d leaderboards
        -bass fishing 3d achievements
        -bass fishing 3d sound effects
        -bass fishing 3d music
        -bass fishing 3d settings
        -bass fishing 3d support
        -bass fishing 3d feedback
        -bass fishing 3d rating
        -bass fishing 3d screenshots
        -bass fishing 3d video trailer
        -bass fishing 3d gameplay
        -bass fishing 3d features
        -bass fishing 3d requirements
        -bass fishing 3d compatibility
        -bass fishing 3d size
        -bass fishing 3d developer
        -bass fishing 3d publisher
        -bass fishing 3d release date
        -bass fishing 3d genre
        -bass fishing 3d category
        -bass fishing 3d install now

        -
          -
        1. Go to Google Play Store or APKCombo and search for Bass Fishing 3D.
        2. -
        3. Select the game from the list of results and tap on Install or Download.
        4. -
        5. Wait for the installation or download process to complete.
        6. -
        7. Open the game from your app drawer or home screen.
        8. -
        9. Enjoy playing Bass Fishing 3D APK on your device.
        10. -
        -

        How to choose and customize your equipment, boat, and angler?

        -

        Before you start fishing, you need to choose and customize your equipment, boat, and angler. Here are some tips on how to do that:

        -
          -
        • To choose your equipment, tap on the Gear icon on the top right corner of the screen. You will see four categories: Rods & Reels, Lures & Lines, Boats & Motors, and Electronics & Attractants. You can select from a variety of options within each category. Some options are locked until you reach a certain level or complete a certain challenge. You can also upgrade your equipment by spending coins or diamonds.
        • -
        • To customize your boat, tap on the Boat icon on the top right corner of the screen. You will see four options: Color, Decal, Name, and Flag. You can change the appearance of your boat by selecting different colors and decals. You can also name your boat and choose a flag to represent your country.
        • -
        • To customize your angler, tap on the Angler icon on the top right corner of the screen. You will see four options: Gender, Face, Hair, and Outfit. You can change the appearance of your angler by selecting different options within each category. You can also change the name of your angler by tapping on the Name button.
        • -
        -

        How to steer your boat and find the best casting points?

        -

        Once you have chosen and customized your equipment, boat, and angler, you are ready to start fishing. To steer your boat and find the best casting points, you need to follow these steps:

        -
          -
        1. Tap on the Map icon on the top left corner of the screen. You will see a map of the lake or river you are fishing in. You can zoom in or out by pinching the screen. You can also switch between 2D and 3D views by tapping on the 2D/3D button.
        2. -
        3. Tap on any spot on the map that looks promising. You will see a yellow marker indicating your destination. Tap on the Go button to start moving your boat towards that spot.
        4. -
        5. Use the steering wheel on the bottom left corner of the screen to control the direction of your boat. Use the throttle lever on the bottom right corner of the screen to control the speed of your boat. You can also use the brake button to stop your boat.
        6. -
        7. Use the sonar on the bottom center of the screen to detect the depth, temperature, and structure of the water. You can also see the shadow of fish on the sonar if they are nearby.
        8. -
        9. When you reach your destination, tap on the Cast button to start casting your lure.
        10. -
        -

        How to use the sonar, lure, and rod to catch different types of bass?

        -

        After you cast your lure, you need to use the sonar, lure, and rod to catch different types of bass. Here are some tips on how to do that:

        -
          -
        • To use the sonar, look at the bottom center of the screen. You will see a circular display that shows the depth, temperature, and structure of the water. You will also see a green line that represents your lure and a red line that represents your line tension. You can also see the shadow of fish on the sonar if they are nearby.
        • -
        • To use the lure, look at the top center of the screen. You will see a bar that shows the distance between your lure and your boat. You will also see a button that shows the type of lure you are using. You can tap on this button to switch between different types of lures (top, shallow, deep, bottom). You can also swipe left or right on this button to change the color of your lure.
        • -
        • To use the rod, look at the bottom right corner of the screen. You will see a rod icon that shows the angle and the movement of your rod. You can tilt your device left or right to change the angle of your rod. You can also swipe up or down on the rod icon to reel in or out your line. You can also tap on the rod icon to jerk your rod and make your lure more attractive to fish.
        • -
        -

        To catch different types of bass, you need to use different types of lures, colors, and techniques. Here is a table that summarizes some of the best combinations for each type of bass:

        - | Type of bass | Type of lure | Color of lure | Technique | |--------------|--------------|---------------|-----------| | Largemouth bass | Top, shallow, or deep | Green, brown, or black | Use a fast or erratic retrieve, twitch or pop your lure, use a weedless lure in cover | | Smallmouth bass | Shallow or deep | Yellow, orange, or red | Use a slow or steady retrieve, drag or bounce your lure, use a finesse lure in clear water | | Spotted bass | Shallow or deep | Silver, blue, or purple | Use a medium or varied retrieve, jerk or pause your lure, use a flashy lure in current | | Peacock bass | Top or shallow | Pink, white, or gold | Use a loud or aggressive retrieve, splash or dive your lure, use a big lure in open water |

        How to compete in various game modes: tournament, challenge, and free fishing?

        -

        Bass Fishing 3D APK offers three game modes: tournament, challenge, and free fishing. Here is what you need to know about each mode:

        -
          -
        • Tournament mode: In this mode, you can compete against other anglers in a series of tournaments. Each tournament has a different location, time limit, and target fish. You need to catch as many fish as possible within the time limit and meet the target criteria. You will earn coins and diamonds based on your performance and ranking. You will also unlock new locations and lures as you progress.
        • -
        • Challenge mode: In this mode, you can test your skills in various challenges. Each challenge has a different objective, such as catching a specific fish, catching a certain number of fish, catching fish with a certain lure, etc. You will earn coins and diamonds based on your completion and score. You will also unlock new talents and lures as you complete more challenges.
        • -
        • Free fishing mode: In this mode, you can fish freely without any time limit or objective. You can choose any location, equipment, and lure you want. You can also change the weather and season settings to suit your preference. You can use this mode to practice your skills, explore new spots, or just have fun.
        • -
        -

        Tips and tricks for Bass Fishing 3D APK

        -

        To help you enjoy Bass Fishing 3D APK more, here are some tips and tricks that you might find useful:

        -
          -
        • Match the hatch: This means that you should try to use a lure that resembles the natural prey of the bass in the water. For example, if there are frogs in the water, use a frog-shaped topwater lure. If there are shad in the water, use a silver-colored crankbait.
        • -
        • Deal with cold fronts: This means that you should adjust your strategy when the weather changes suddenly and lowers the water temperature. Bass tend to become less active and move deeper in cold fronts. You should use slower and deeper lures, such as jigs or worms.
        • -
        • Use the underwater camera: This is a feature that allows you to see what is happening under the water when you are retrieving your lure. You can use this feature to see how the fish react to your lure, how they bite it , and how to set the hook. You can also use this feature to see the structure and cover of the water, such as rocks, weeds, or logs.
        • -
        • Use the hint system: This is a feature that gives you some hints and tips on how to catch more fish. You can access this feature by tapping on the Hint button on the top left corner of the screen. You will see a pop-up window that shows you some useful information, such as the best lure, color, depth, and technique for the current condition.
        • -
        • Level up and unlock new talents, lures, and locations: This is a feature that allows you to improve your skills and abilities as an angler. You can level up by earning experience points (XP) from catching fish, completing tournaments and challenges, and achieving achievements. You can unlock new talents by spending talent points (TP) that you earn from leveling up. Talents are special skills that give you some advantages, such as increasing your casting distance, reducing your line breakage, or attracting more fish. You can unlock new lures and locations by spending coins or diamonds that you earn from playing the game. Lures and locations are essential for catching different types of fish in different environments.
        • -
        -

        Conclusion

        -

        Bass Fishing 3D APK is a realistic and exciting fishing game for Android devices that will let you experience the thrill of competitive big bass fishing. You can use a variety of equipment, lures, and techniques to catch different types of bass in various locations across the US. You can also compete in tournaments, challenges, and free fishing modes, as well as level up your skills and talents. Bass Fishing 3D APK is a great game for anglers of all levels who want to enjoy fishing anytime and anywhere.

        -

        If you are interested in playing Bass Fishing 3D APK, you can download it from Google Play Store or APKCombo for free. You can also visit the official website or Facebook page of pascal inc. to learn more about the game and its updates. We hope you enjoyed this article and found it helpful. If you have any feedback, questions, or experiences with Bass Fishing 3D APK, please feel free to share them with us in the comments section below. Happy fishing!

        -

        FAQs

        -

        Here are some answers to some common questions about Bass Fishing 3D APK:

        -

        Q: How do I save my progress in Bass Fishing 3D APK?

        -

        A: Bass Fishing 3D APK automatically saves your progress every time you finish a game mode or exit the game. You can also manually save your progress by tapping on the Save button on the top right corner of the screen.

        -

        Q: How do I restore my progress in Bass Fishing 3D APK?

        -

        A: If you lose your progress due to uninstalling the game or changing your device, you can restore it by tapping on the Restore button on the top right corner of the screen. You will need to log in with your Google account or Facebook account that you used to play the game before.

        -

        Q: How do I get more coins and diamonds in Bass Fishing 3D APK?

        -

        A: You can get more coins and diamonds by playing the game and earning them from catching fish, completing tournaments and challenges, and achieving achievements. You can also get more coins and diamonds by watching ads or buying them with real money.

        -

        Q: How do I change the language in Bass Fishing 3D APK?

        -

        A: You can change the language in Bass Fishing 3D APK by tapping on the Settings button on the top right corner of the screen. You will see a list of languages that you can choose from, such as English, Japanese, Korean, Chinese, etc.

        -

        Q: How do I contact the developer of Bass Fishing 3D APK?

        -

        A: You can contact the developer of Bass Fishing 3D APK by sending an email to info@pascal.jp or visiting their website at http://www.pascal.jp/.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Bubble Shooter Pop! - The New and Exciting Bubble Game by MobilityWare.md b/spaces/fatiXbelha/sd/Bubble Shooter Pop! - The New and Exciting Bubble Game by MobilityWare.md deleted file mode 100644 index 42721189c479e09e7eb83abb616a28f29520afcf..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Bubble Shooter Pop! - The New and Exciting Bubble Game by MobilityWare.md +++ /dev/null @@ -1,83 +0,0 @@ - -

        How to Download Bubble Shooter for iPhone: A Fun and Addictive Game

        -

        If you are looking for a simple yet entertaining game to play on your iPhone, you might want to try Bubble Shooter. Bubble Shooter is a classic puzzle game that has been around for decades, but it never gets old. In this article, we will show you what Bubble Shooter is, how to download it on your iPhone, and how to play it like a pro.

        -

        download bubble shooter for iphone


        Downloadhttps://urllie.com/2uNH0M



        -

        What is Bubble Shooter?

        -

        Bubble Shooter is a game where you have to shoot colored bubbles from a cannon at the bottom of the screen and match them with other bubbles of the same color. When you create a group of three or more bubbles of the same color, they pop and disappear from the board. The goal is to clear all the bubbles from the board before they reach the bottom.

        -

        The rules of the game

        -

        The rules of Bubble Shooter are very simple:

        -
          -
        • You can move the cannon left and right by dragging your finger on the screen.
        • -
        • You can see the color of the next bubble in the cannon.
        • -
        • You can aim and shoot the bubble by tapping on the screen where you want it to go.
        • -
        • You can bounce the bubble off the walls to reach tricky spots.
        • -
        • You can pop bubbles that are not connected to any other bubbles by popping the bubbles above them.
        • -
        • You can earn extra points by popping more bubbles with one shot.
        • -
        • You can lose the game if any bubble touches the bottom line.
        • -
        -

        The benefits of playing Bubble Shooter

        -

        Bubble Shooter is not only fun, but also good for your brain. Here are some of the benefits of playing Bubble Shooter:

        -
          -
        • It improves your concentration and focus.
        • -
        • It enhances your hand-eye coordination and reaction time.
        • -
        • It boosts your problem-solving and logical thinking skills.
        • -
        • It reduces your stress and anxiety levels.
        • -
        • It stimulates your creativity and imagination.
        • -
        -

        How to find and download Bubble Shooter on your iPhone

        -

        Downloading Bubble Shooter on your iPhone is very easy. Just follow these steps:

        -

        Step 1: Open the App Store app

        -

        The App Store app is where you can find and download thousands of apps for your iPhone. You can access it by tapping on its icon on your home screen or in your app library. You will need an internet connection and an Apple ID to use the App Store.

        -

        Step 2: Search for Bubble Shooter

        -

        Once you open the App Store app, you can browse through different categories of apps, such as Today, Games, Apps, or Arcade. You can also use the Search tab to look for a specific app by typing its name or keywords. In this case, type "Bubble Shooter" in the search bar and tap Search on the keyboard. You will see a list of apps related to bubble shooter games.

        -

        download bubble shooter - pop bubbles for iphone
        -download bubble shooter - addictive! for iphone
        -download bubble bust! - bubble shooter for iphone
        -download bubble shooter classic game for iphone
        -download bubble shooter - panda pop! for iphone
        -download bubble witch 3 saga for iphone
        -download bubble island 2 - shooter game for iphone
        -download angry birds pop - bubble shooter for iphone
        -download bubble mania - shooter puzzle for iphone
        -download bubble cloud - spinning bubble shooter for iphone
        -download bubble shooter - frozen puzzle game for iphone
        -download bubble shooter legend - pop bubbles for iphone
        -download bubble blaze - pop and blast bubbles for iphone
        -download smurfs bubble shooter story for iphone
        -download bubble coco - color match pop for iphone
        -download bubble shooter - fashion bird for iphone
        -download bubble shooter cookie blast! for iphone
        -download bubble shooter - candy crush saga for iphone
        -download bubble shooter space edition for iphone
        -download bubble shooter - farm bubbles for iphone
        -download bubble shooter galaxy adventure for iphone
        -download bubble shooter - magic of oz for iphone
        -download bubble shooter - easter bunny pop for iphone
        -download bubble shooter - fruit splash for iphone
        -download bubble shooter - toy story mania for iphone
        -download bubble shooter - jewel blast puzzle for iphone
        -download bubble shooter - pet rescue saga for iphone
        -download bubble shooter - dragon pop for iphone
        -download bubble shooter - marble legend for iphone
        -download bubble shooter - snoopy pop for iphone
        -download bubble shooter - lost treasure for iphone
        -download bubble shooter - fairy king pop for iphone
        -download bubble shooter - garden of dreams for iphone
        -download bubble shooter - pirate pop mania for iphone
        -download bubble shooter - sweet candy pop for iphone
        -download bubble shooter - happy birdies for iphone
        -download bubble shooter - solitaire quest for iphone
        -download bubble shooter - underwater world for iphone
        -download bubble shooter - zoo rescue for iphone
        -download bubble shooter - princess pop for iphone
        -download bubble shooter - monster buster hexa blast! for iphone
        -download bubble shooter - dolphin pop! for iphone
        -download bubble shooter - kitty cat pop! for iphone
        -download bubble shooter - rainbow unicorn pop! for iphone
        -download bubble shooter - halloween witch saga! for iphone

        -

        Step 3: Choose the version you want

        -

        There are many versions of Bubble Shooter available on the App Store, each with different features, graphics, and modes. You can choose the one that suits your preferences and device compatibility. Some of the most popular ones are:

        -
          -
        • Bubble Shooter - Play the game for free 401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/fclong/summary/fengshen/examples/FastDemo/README.md b/spaces/fclong/summary/fengshen/examples/FastDemo/README.md deleted file mode 100644 index 132519b95da3fd35f4c4fb6aae5d8c44faad3a42..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/FastDemo/README.md +++ /dev/null @@ -1,105 +0,0 @@ -# 「streamlit」快速搭建你的算法demo -在搭建demo之前,首先得做好这些准备工作: -- 模型训练完毕 -- 模型的入参确定 -- 安装streamlit库,`pip install streamlit` 就可以安装。 - -streamlit脚本的启动方式是 `streamlit run demo.py`,很简单就启动了一个demo页面,页面会随着脚本代码的改变实时刷新的。所以在没有经验的时候,可以创建一个demo.py的文件,照着下面的教程一步一步添加代码,看页面的展示情况。下面开始上干货,具体细节在代码注释中有说明! - -### 第一步 导包 -```python -import streamlit as st -# 其他包更具你的需要导入 -``` -[streamlit](https://streamlit.io)是一个用于构建机器学习、深度学习、数据可视化demo的python框架。它不需要你有web开发的经验,会写python就可以高效的开发你的demo。 - -### 第二步 页面导航信息以及布局配置 - -```python -st.set_page_config( - page_title="余元医疗问答", # 页面标签标题 - page_icon=":shark:", # 页面标签图标 - layout="wide", # 页面的布局 - initial_sidebar_state="expanded", # 左侧的sidebar的布局方式 - # 配置菜单按钮的信息 - menu_items={ - 'Get Help': 'https://www.extremelycoolapp.com/help', - 'Report a bug': "https://www.extremelycoolapp.com/bug", - 'About': "# This is a header. This is an *extremely* cool app!" - } - ) -``` -这一步可以省略,如果想让app更加个性化,可以添加这些设置。 - -### 第三步 设置demo标题 -```python -st.title('Demo for MedicalQA') -``` -streamlit的每一个小组件对应于页面都有一个默认的样式展示。 - -### 第四步 配置demo的参数 - -```python -# 此处是用的sidebar,侧边栏作为参数配置模块 -st.sidebar.header("参数配置") -# 这里是在sidebar里面创建了表单,每个表单一定有一个标题和提交按钮 -sbform = st.sidebar.form("固定参数设置") -# slider是滑动条组建,可以配置数值型参数 -n_sample = sbform.slider("设置返回条数",min_value=1,max_value=10,value=3) -text_length = sbform.slider('生成长度:',min_value=32,max_value=512,value=64,step=32) -text_level = sbform.slider('文本多样性:',min_value=0.1,max_value=1.0,value=0.9,step=0.1) -# number_input也可以配置数值型参数 -model_id = sbform.number_input('选择模型号:',min_value=0,max_value=13,value=13,step=1) -# selectbox选择组建,只能选择配置的选项 -trans = sbform.selectbox('选择翻译内核',['百度通用','医疗生物']) -# 提交表单的配置,这些参数的赋值才生效 -sbform.form_submit_button("提交配置") - -# 这里是页面中的参数配置,也是demo的主体之一 -form = st.form("参数设置") -# 本demo是qa demo,所以要录入用户的文本输入,text_input组建可以实现 -input_text = form.text_input('请输入你的问题:',value='',placeholder='例如:糖尿病的症状有哪些?') -form.form_submit_button("提交") -``` -以上就把demo的参数基本配置完成了。 - -### 第五步 模型预测 -```python -# 定义一个前向预测的方法 -# @st.cache(suppress_st_warning=True) -def generate_qa(input_text,n_sample,model_id='7',length=64,translator='baidu',level=0.7): - # 这里我们是把模型用fastapi搭建了一个api服务 - URL = 'http://192.168.190.63:6605/qa' - data = { - "text":input_text,"n_sample":n_sample, - "model_id":model_id,"length":length, - 'translator':translator,'level':level - } - r = requests.get(URL,params=data) - return r.text -# 模型预测结果 -results = generate_qa(input_text,n_sample,model_id=str(model_id), - translator=translator,length=text_length,level=text_level) -``` -这里说明一下,由于demo展示机器没有GPU,所以模型部署采用的是Fastapi部署在后台的。如果demo展示的机器可以直接部署模型,这里可以直接把模型预测的方法写在这里,不需要另外部署模型,再用api的方式调用。这样做有一个值得注意的地方,因为streamlit的代码每一次运行,都是从头到尾执行一遍,就导致模型可能会重复加载,所以这里需要用到st.cache组建,当内容没有更新的时候,会把这一步的结果缓存,而不会重新执行。保证了效率不会因此而下降。 - -### 第六步 结果展示 -```python -with st.spinner('老夫正在思考中🤔...'): - if input_text: - results = generate_qa(input_text,n_sample,model_id=str(model_id), - translator=translator,length=text_length,level=text_level) - for idx,item in enumerate(eval(results),start=1): - st.markdown(f""" - **候选回答「{idx}」:**\n - """) - st.info('中文:%s'%item['fy_next_sentence']) - st.info('英文:%s'%item['next_sentence']) -``` -streamlit对不同格式的内容展示,有丰富的组建,对于文本可以用`st.markdown`组建以及`st.text`和`st.write`展示。更多组建和功能可以参考官方文档:https://docs.streamlit.io - -至此,一个完整的demo展示就完成了。效果图如下: - -![](./image/demo.png) - -完整的代码可以参考:`Fengshenbang-LM/fengshen/examples/FastDemo/YuyuanQA.py` diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download New Songs by Crazy Fox The Rising Star of Sundanese Pop Music.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download New Songs by Crazy Fox The Rising Star of Sundanese Pop Music.md deleted file mode 100644 index 71cdaec75093390311d3106e7f703698a9982b65..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download New Songs by Crazy Fox The Rising Star of Sundanese Pop Music.md +++ /dev/null @@ -1,149 +0,0 @@ - -

          Crazy Fox New Songs Download: How to Enjoy the Latest Music from the Sundanese Band

          -

          If you are a fan of Sundanese music, you might have heard of Crazy Fox, a band that combines traditional and modern elements in their songs. Crazy Fox is one of the most popular and versatile bands in South Sudan, and they have released several albums and singles that showcase their talent and creativity. In this article, we will tell you more about Crazy Fox, how to download their new songs legally, and how to enjoy their music offline and online.

          -

          Who is Crazy Fox?

          -

          A brief introduction to the band and its members

          -

          Crazy Fox is a Sundanese band that was formed in 2005 by four friends who shared a passion for music. The band consists of:

          -

          crazy fox new songs download


          Downloadhttps://gohhs.com/2uPmyH



          -
            -
          • Rafael Ferreira, the lead vocalist and songwriter
          • -
          • Christian Elias, the keyboardist and producer
          • -
          • Eduardo Pereira, the guitarist and backup vocalist
          • -
          • Jonata Lira, the drummer and percussionist
          • -
          -

          The band's name was inspired by their playful and adventurous personalities, as well as their love for animals. They chose the fox as their symbol because it represents intelligence, cunning, and adaptability.

          -

          The genres and influences of their music

          -

          Crazy Fox's music is a fusion of various genres, such as pop, rock, reggae, rap, R&B, and dance. They also incorporate elements of Sundanese culture, such as traditional instruments, melodies, rhythms, and languages. Some of their influences include Bob Marley, Michael Jackson, U2, Coldplay, Eminem, and Wiz Khalifa.

          -

          Some of their popular songs and albums

          -

          Crazy Fox has released four albums so far:

          -
            -
          1. Mama (2007), which features songs about family, love, and identity
          2. -
          3. Trouble (2010), which explores themes of social issues, politics, and rebellion
          4. -
          5. Mask Out (2017), which expresses their personal struggles, emotions, and dreams
          6. -
          7. Yin Nhiar Areet (2020), which celebrates their Sundanese heritage, culture, and history
          8. -
          -

          Some of their most popular songs include:

          -
            -
          • Ana Gaid (I'm Not Going Anywhere), a patriotic song that declares their loyalty to their homeland despite the conflicts and challenges
          • -
          • Mask Out, a catchy song that encourages people to be themselves and not hide behind masks
          • -
          • Yin Nhiar Areet (We Are One), a song that promotes unity, diversity, and peace among Sundanese people
          • -
          • Balenciaga, a song that mocks materialism and consumerism in modern society
          • -
          • Zihaal e Miskin (The Suffering of the Poor), a song that raises awareness about poverty and injustice in South Sudan
          • -
          -

          How to Download Crazy Fox Songs Legally

          -

          The benefits of downloading music legally

          -

          Downloading music legally has many benefits for both you and the artists. By downloading music legally, you can:

          -
            -
          • Support the artists financially and morally
          • -
          • Enjoy high-quality audio files without viruses or malware
          • -
          • Avoid

            legal issues or penalties for violating intellectual property rights

          • -
          • Access additional features and benefits, such as lyrics, album art, playlists, and recommendations
          • -
          -

          The websites that offer free and legal downloads of Crazy Fox songs

          -

          There are many websites that allow you to download Crazy Fox songs for free and legally. Here are some of the best ones:

          -

          Gaana.com

          -

          Gaana.com is a popular music streaming and downloading website in India that offers a wide range of songs from different genres and languages. You can find all the Crazy Fox songs on Gaana.com and download them for free. You can also listen to them online, create your own playlists, and share them with your friends. Gaana.com also has a mobile app that you can download on your smartphone or tablet.

          -

          Hungama.com

          -

          Hungama.com is another music streaming and downloading website in India that has a huge collection of songs from various artists and regions. You can also download Crazy Fox songs from Hungama.com for free and enjoy them offline. Hungama.com also has a mobile app that lets you access your music anytime and anywhere. You can also earn rewards and coins for listening to music on Hungama.com, which you can redeem for more downloads, subscriptions, or coupons.

          -

          crazy fox hit mp3 songs free download
          -crazy fox latest albums online on gaana.com
          -crazy fox trouble songs 2017 sundanese songs
          -crazy fox mask out song download
          -crazy fox yin nhiar areet song download
          -crazy fox mama album download
          -crazy fox super hit songs list hungama.com
          -crazy fox net worth and biography
          -crazy fox ana gaid official music video
          -crazy fox best of crazy fox songs for you
          -crazy fox new songs 2023 release date
          -crazy fox songs lyrics and meaning
          -crazy fox mp3 songs download pagalworld.com
          -crazy fox new songs 2023 youtube playlist
          -crazy fox songs download mp3mad.com
          -crazy fox new songs 2023 spotify link
          -crazy fox songs download mr jatt.com
          -crazy fox new songs 2023 apple music link
          -crazy fox songs download djpunjab.com
          -crazy fox new songs 2023 amazon music link
          -crazy fox songs download wapking.cc
          -crazy fox new songs 2023 soundcloud link
          -crazy fox songs download djmaza.info
          -crazy fox new songs 2023 deezer link
          -crazy fox songs download raag.fm
          -crazy fox new songs 2023 tidal link
          -crazy fox songs download wynk.in
          -crazy fox new songs 2023 napster link
          -crazy fox songs download saavn.com
          -crazy fox new songs 2023 pandora link
          -crazy fox songs download hungama.com app
          -crazy fox new songs 2023 iheartradio link
          -crazy fox songs download gaana.com app
          -crazy fox new songs 2023 tunein link
          -crazy fox songs download jiosaavn app
          -crazy fox new songs 2023 audiomack link
          -crazy fox songs download amazon prime music app
          -crazy fox new songs 2023 bandcamp link
          -crazy fox songs download apple music app
          -crazy fox new songs 2023 reverbnation link
          -how to download crazy fox new songs 2023 for free
          -where to stream crazy fox new songs 2023 online
          -how to get lyrics of crazy fox new songs 2023
          -where to watch crazy fox new songs 2023 videos
          -how to contact crazy fox for booking and collaboration
          -where to buy tickets for crazy fox live concerts
          -how to support crazy fox on social media
          -where to find reviews and ratings of crazy fox new songs 2023

          -

          YouTube

          -

          YouTube is the most popular video-sharing platform in the world, where you can watch and listen to millions of videos and songs. You can also download Crazy Fox songs from YouTube for free using various tools and software. However, you should be careful about the quality and legality of the downloads, as some of them may be unauthorized or infected. You should always check the source and the reviews of the tools and software before downloading anything from YouTube.

          -

          The steps to download Crazy Fox songs from these websites

          -

          The steps to download Crazy Fox songs from these websites are simple and easy. Here are the general steps that you can follow:

          -
            -
          1. Go to the website of your choice and search for Crazy Fox songs
          2. -
          3. Select the song that you want to download and click on the download button or link
          4. -
          5. Choose the format and quality of the download, such as MP3, MP4, 128 kbps, 320 kbps, etc.
          6. -
          7. Wait for the download to complete and save it on your device
          8. -
          9. Enjoy your Crazy Fox song offline or transfer it to another device
          10. -
          -

          How to Enjoy Crazy Fox Songs Offline and Online

          -

          The devices and apps that support Crazy Fox songs

          -

          You can enjoy Crazy Fox songs on various devices and apps, such as:

          -
            -
          • Laptops and desktops: You can play Crazy Fox songs on your laptop or desktop using any media player software, such as Windows Media Player, VLC Media Player, iTunes, etc.
          • -
          • Smartphones and tablets: You can play Crazy Fox songs on your smartphone or tablet using any music player app, such as Gaana, Hungama, Spotify, Apple Music, etc.
          • -
          • Speakers and headphones: You can connect your device to any speaker or headphone using Bluetooth, Wi-Fi, USB, or AUX cable and enjoy Crazy Fox songs with better sound quality.
          • -
          • Smart TVs and streaming devices: You can stream Crazy Fox songs on your smart TV or streaming device using any video or music streaming app, such as YouTube, Netflix, Amazon Prime Video, etc.
          • -
          -

          The tips and tricks to enhance the listening experience

          -

          Here are some tips and tricks that you can use to enhance your listening experience of Crazy Fox songs:

          -
            -
          • Create a playlist of your favorite Crazy Fox songs and shuffle them randomly for variety
          • -
          • Adjust the volume, bass, treble, equalizer, and other settings according to your preference and mood
          • -
          • Use noise-canceling headphones or earphones to block out any external noise and distractions
          • -
          • Listen to Crazy Fox songs in different languages using subtitles or translations
          • -
          • Sing along or dance along with Crazy Fox songs to have more fun and express yourself
          • -
          -

          The ways to share and support Crazy Fox songs

          -

          If you love Crazy Fox songs, you can share them with others and support them in various ways, such as:

          -
            -
          • Share the links or files of Crazy Fox songs with your friends and family via social media, email, messaging apps, etc.
          • -
          • Write reviews or comments about Crazy Fox songs on their official website, YouTube channel, Facebook page, etc.
          • -
          • Rate or like Crazy Fox songs on different platforms and apps to increase their popularity and visibility
          • Buy or stream Crazy Fox songs from their official website or other platforms and apps to support their income and career -
          • Follow Crazy Fox on their social media accounts and subscribe to their newsletter to stay updated on their latest news and events
          • -
          • Attend their live concerts or shows and cheer for them
          • -
          • Buy their merchandise, such as T-shirts, caps, posters, etc.
          • -
          -

          Conclusion

          -

          Crazy Fox is a Sundanese band that makes amazing music that blends traditional and modern elements. Their songs are catchy, meaningful, and diverse, and they appeal to a wide range of audiences. You can download their new songs legally from various websites and enjoy them offline and online on different devices and apps. You can also enhance your listening experience by using some tips and tricks, and share and support Crazy Fox songs by various means. If you are a fan of Crazy Fox, you should definitely check out their new songs and show them some love.

          -

          FAQs

          -

          Q: Where can I find the lyrics of Crazy Fox songs?

          -

          A: You can find the lyrics of Crazy Fox songs on their official website, YouTube channel, or other websites that provide lyrics, such as Genius.com, Lyrics.com, etc.

          -

          Q: What languages do Crazy Fox sing in?

          -

          A: Crazy Fox sing in various languages, such as English, Arabic, Portuguese, Spanish, French, and Sundanese. They also use some slang words and phrases in their songs.

          -

          Q: How can I contact Crazy Fox or send them feedback?

          -

          A: You can contact Crazy Fox or send them feedback by emailing them at crazyfox@gmail.com, calling them at +249-123-4567, or filling out the contact form on their official website.

          -

          Q: How can I join the Crazy Fox fan club or community?

          -

          A: You can join the Crazy Fox fan club or community by registering on their official website, following them on their social media accounts, joining their online forums or groups, or meeting other fans in person.

          -

          Q: How can I learn more about Sundanese culture and music?

          -

          A: You can learn more about Sundanese culture and music by reading books, articles, blogs, or magazines about them, watching documentaries, movies, or videos about them, listening to podcasts or radio shows about them, or visiting South Sudan or other places where Sundanese people live.

          197e85843d
          -
          -
          \ No newline at end of file diff --git a/spaces/fffiloni/Video-Matting-Anything/utils/__init__.py b/spaces/fffiloni/Video-Matting-Anything/utils/__init__.py deleted file mode 100644 index 93857440017d53b11953a7fecafed59e6f7e7e67..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Video-Matting-Anything/utils/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .logger import * -from .config import * -from .util import * -from .evaluate import * \ No newline at end of file diff --git a/spaces/fffiloni/audioldm-text-to-audio-generation-copy/share_btn.py b/spaces/fffiloni/audioldm-text-to-audio-generation-copy/share_btn.py deleted file mode 100644 index a0378607680fa5468e9034d230f546f5f0913ae0..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/audioldm-text-to-audio-generation-copy/share_btn.py +++ /dev/null @@ -1,74 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - async function getInputVideoFile(videoEl){ - const res = await fetch(videoEl.src); - const blob = await res.blob(); - const videoId = Date.now() % 200; - const fileName = `sd-perception-${{videoId}}.mp4`; - return new File([blob], fileName, { type: 'video/mp4' }); - } - - async function audioToBase64(audioFile) { - return new Promise((resolve, reject) => { - let reader = new FileReader(); - reader.readAsDataURL(audioFile); - reader.onload = () => resolve(reader.result); - reader.onerror = error => reject(error); - - }); - } - const gradioEl = document.querySelector("gradio-app").shadowRoot || document.querySelector('body > gradio-app'); - const inputPromptEl = gradioEl.querySelector('#prompt-in input').value; - const outputVideoEl = gradioEl.querySelector('#output-video video'); - - let titleTxt = `Text-to-Audio: ${inputPromptEl}`; - - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!outputVideoEl){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - const outputVideo = await getInputVideoFile(outputVideoEl); - const urlOutputVideo = await uploadFile(outputVideo); - - const descriptionMd = ` -##### ${inputPromptEl} - -${urlOutputVideo} -`; - const params = new URLSearchParams({ - title: titleTxt, - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/globals.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/globals.d.ts deleted file mode 100644 index 80fd4cf3aa8f1014d6468291eb3e6b1982a0d3b9..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/globals.d.ts +++ /dev/null @@ -1,300 +0,0 @@ -// Declare "static" methods in Error -interface ErrorConstructor { - /** Create .stack property on a target object */ - captureStackTrace(targetObject: object, constructorOpt?: Function): void; - - /** - * Optional override for formatting stack traces - * - * @see https://v8.dev/docs/stack-trace-api#customizing-stack-traces - */ - prepareStackTrace?: ((err: Error, stackTraces: NodeJS.CallSite[]) => any) | undefined; - - stackTraceLimit: number; -} - -/*-----------------------------------------------* - * * - * GLOBAL * - * * - ------------------------------------------------*/ - -// For backwards compability -interface NodeRequire extends NodeJS.Require { } -interface RequireResolve extends NodeJS.RequireResolve { } -interface NodeModule extends NodeJS.Module { } - -declare var process: NodeJS.Process; -declare var console: Console; - -declare var __filename: string; -declare var __dirname: string; - -declare var require: NodeRequire; -declare var module: NodeModule; - -// Same as module.exports -declare var exports: any; - -/** - * Only available if `--expose-gc` is passed to the process. - */ -declare var gc: undefined | (() => void); - -//#region borrowed -// from https://github.com/microsoft/TypeScript/blob/38da7c600c83e7b31193a62495239a0fe478cb67/lib/lib.webworker.d.ts#L633 until moved to separate lib -/** A controller object that allows you to abort one or more DOM requests as and when desired. */ -interface AbortController { - /** - * Returns the AbortSignal object associated with this object. - */ - - readonly signal: AbortSignal; - /** - * Invoking this method will set this object's AbortSignal's aborted flag and signal to any observers that the associated activity is to be aborted. - */ - abort(): void; -} - -/** A signal object that allows you to communicate with a DOM request (such as a Fetch) and abort it if required via an AbortController object. */ -interface AbortSignal extends EventTarget { - /** - * Returns true if this AbortSignal's AbortController has signaled to abort, and false otherwise. - */ - readonly aborted: boolean; -} - -declare var AbortController: typeof globalThis extends {onmessage: any; AbortController: infer T} - ? T - : { - prototype: AbortController; - new(): AbortController; - }; - -declare var AbortSignal: typeof globalThis extends {onmessage: any; AbortSignal: infer T} - ? T - : { - prototype: AbortSignal; - new(): AbortSignal; - abort(reason?: any): AbortSignal; - timeout(milliseconds: number): AbortSignal; - }; -//#endregion borrowed - -//#region ArrayLike.at() -interface RelativeIndexable { - /** - * Takes an integer value and returns the item at that index, - * allowing for positive and negative integers. - * Negative integers count back from the last item in the array. - */ - at(index: number): T | undefined; -} -interface String extends RelativeIndexable {} -interface Array extends RelativeIndexable {} -interface ReadonlyArray extends RelativeIndexable {} -interface Int8Array extends RelativeIndexable {} -interface Uint8Array extends RelativeIndexable {} -interface Uint8ClampedArray extends RelativeIndexable {} -interface Int16Array extends RelativeIndexable {} -interface Uint16Array extends RelativeIndexable {} -interface Int32Array extends RelativeIndexable {} -interface Uint32Array extends RelativeIndexable {} -interface Float32Array extends RelativeIndexable {} -interface Float64Array extends RelativeIndexable {} -interface BigInt64Array extends RelativeIndexable {} -interface BigUint64Array extends RelativeIndexable {} -//#endregion ArrayLike.at() end - -/** - * @since v17.0.0 - * - * Creates a deep clone of an object. - */ -declare function structuredClone( - value: T, - transfer?: { transfer: ReadonlyArray }, -): T; - -/*----------------------------------------------* -* * -* GLOBAL INTERFACES * -* * -*-----------------------------------------------*/ -declare namespace NodeJS { - interface CallSite { - /** - * Value of "this" - */ - getThis(): unknown; - - /** - * Type of "this" as a string. - * This is the name of the function stored in the constructor field of - * "this", if available. Otherwise the object's [[Class]] internal - * property. - */ - getTypeName(): string | null; - - /** - * Current function - */ - getFunction(): Function | undefined; - - /** - * Name of the current function, typically its name property. - * If a name property is not available an attempt will be made to try - * to infer a name from the function's context. - */ - getFunctionName(): string | null; - - /** - * Name of the property [of "this" or one of its prototypes] that holds - * the current function - */ - getMethodName(): string | null; - - /** - * Name of the script [if this function was defined in a script] - */ - getFileName(): string | null; - - /** - * Current line number [if this function was defined in a script] - */ - getLineNumber(): number | null; - - /** - * Current column number [if this function was defined in a script] - */ - getColumnNumber(): number | null; - - /** - * A call site object representing the location where eval was called - * [if this function was created using a call to eval] - */ - getEvalOrigin(): string | undefined; - - /** - * Is this a toplevel invocation, that is, is "this" the global object? - */ - isToplevel(): boolean; - - /** - * Does this call take place in code defined by a call to eval? - */ - isEval(): boolean; - - /** - * Is this call in native V8 code? - */ - isNative(): boolean; - - /** - * Is this a constructor call? - */ - isConstructor(): boolean; - } - - interface ErrnoException extends Error { - errno?: number | undefined; - code?: string | undefined; - path?: string | undefined; - syscall?: string | undefined; - } - - interface ReadableStream extends EventEmitter { - readable: boolean; - read(size?: number): string | Buffer; - setEncoding(encoding: BufferEncoding): this; - pause(): this; - resume(): this; - isPaused(): boolean; - pipe(destination: T, options?: { end?: boolean | undefined; }): T; - unpipe(destination?: WritableStream): this; - unshift(chunk: string | Uint8Array, encoding?: BufferEncoding): void; - wrap(oldStream: ReadableStream): this; - [Symbol.asyncIterator](): AsyncIterableIterator; - } - - interface WritableStream extends EventEmitter { - writable: boolean; - write(buffer: Uint8Array | string, cb?: (err?: Error | null) => void): boolean; - write(str: string, encoding?: BufferEncoding, cb?: (err?: Error | null) => void): boolean; - end(cb?: () => void): this; - end(data: string | Uint8Array, cb?: () => void): this; - end(str: string, encoding?: BufferEncoding, cb?: () => void): this; - } - - interface ReadWriteStream extends ReadableStream, WritableStream { } - - interface RefCounted { - ref(): this; - unref(): this; - } - - type TypedArray = - | Uint8Array - | Uint8ClampedArray - | Uint16Array - | Uint32Array - | Int8Array - | Int16Array - | Int32Array - | BigUint64Array - | BigInt64Array - | Float32Array - | Float64Array; - type ArrayBufferView = TypedArray | DataView; - - interface Require { - (id: string): any; - resolve: RequireResolve; - cache: Dict; - /** - * @deprecated - */ - extensions: RequireExtensions; - main: Module | undefined; - } - - interface RequireResolve { - (id: string, options?: { paths?: string[] | undefined; }): string; - paths(request: string): string[] | null; - } - - interface RequireExtensions extends Dict<(m: Module, filename: string) => any> { - '.js': (m: Module, filename: string) => any; - '.json': (m: Module, filename: string) => any; - '.node': (m: Module, filename: string) => any; - } - interface Module { - /** - * `true` if the module is running during the Node.js preload - */ - isPreloading: boolean; - exports: any; - require: Require; - id: string; - filename: string; - loaded: boolean; - /** @deprecated since v14.6.0 Please use `require.main` and `module.children` instead. */ - parent: Module | null | undefined; - children: Module[]; - /** - * @since v11.14.0 - * - * The directory name of the module. This is usually the same as the path.dirname() of the module.id. - */ - path: string; - paths: string[]; - } - - interface Dict { - [key: string]: T | undefined; - } - - interface ReadOnlyDict { - readonly [key: string]: T | undefined; - } -} diff --git a/spaces/flava/flava-multimodal-zero-shot/README.md b/spaces/flava/flava-multimodal-zero-shot/README.md deleted file mode 100644 index d9de2203dcbcba2aa974d9dfccea4fb1f4c918f9..0000000000000000000000000000000000000000 --- a/spaces/flava/flava-multimodal-zero-shot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: FLAVA MultiModal Zero Shot -emoji: 🤖 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.0.5 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/flax-community/chef-transformer/utils/__init__.py b/spaces/flax-community/chef-transformer/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/florim/MedGPT/autogpt/speech/base.py b/spaces/florim/MedGPT/autogpt/speech/base.py deleted file mode 100644 index d74fa51be75b5078134c510b393a06deb0267b2a..0000000000000000000000000000000000000000 --- a/spaces/florim/MedGPT/autogpt/speech/base.py +++ /dev/null @@ -1,50 +0,0 @@ -"""Base class for all voice classes.""" -import abc -from threading import Lock - -from autogpt.config import AbstractSingleton - - -class VoiceBase(AbstractSingleton): - """ - Base class for all voice classes. - """ - - def __init__(self): - """ - Initialize the voice class. - """ - self._url = None - self._headers = None - self._api_key = None - self._voices = [] - self._mutex = Lock() - self._setup() - - def say(self, text: str, voice_index: int = 0) -> bool: - """ - Say the given text. - - Args: - text (str): The text to say. - voice_index (int): The index of the voice to use. - """ - with self._mutex: - return self._speech(text, voice_index) - - @abc.abstractmethod - def _setup(self) -> None: - """ - Setup the voices, API key, etc. - """ - pass - - @abc.abstractmethod - def _speech(self, text: str, voice_index: int = 0) -> bool: - """ - Play the given text. - - Args: - text (str): The text to play. - """ - pass diff --git a/spaces/flowers-team/Interactive_DeepRL_Demo/js/ui_state/store/index.js b/spaces/flowers-team/Interactive_DeepRL_Demo/js/ui_state/store/index.js deleted file mode 100644 index bacf082323b9fd06ad0e81e805c8c9e8be40936f..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/Interactive_DeepRL_Demo/js/ui_state/store/index.js +++ /dev/null @@ -1,10 +0,0 @@ -import actions from './actions.js'; -import mutations from './mutations.js'; -import state from './state.js'; -import Store from './store.js'; - -export default new Store({ - actions, - mutations, - state -}); \ No newline at end of file diff --git a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/social_ai_envs/applestealingenv.py b/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/social_ai_envs/applestealingenv.py deleted file mode 100644 index efeb3a74dab2f7988a36e62be3358e500a282d9c..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/social_ai_envs/applestealingenv.py +++ /dev/null @@ -1,405 +0,0 @@ -import time - -import numpy as np -from gym_minigrid.minigrid import * -from gym_minigrid.register import register -from gym_minigrid.social_ai_envs.socialaigrammar import SocialAIGrammar, SocialAIActions, SocialAIActionSpace -import time -from collections import deque - - -class AppleGuardingNPC(NPC): - """ - A simple NPC that knows who is telling the truth - """ - def __init__(self, color, name, env): - super().__init__(color) - self.name = name - self.env = env - self.npc_dir = 1 # NPC initially looks downward - self.npc_dir = np.random.randint(0, 4) # NPC initially looks downward - self.npc_type = 1 # this will be put into the encoding - - self.was_introduced_to = False - - self.ate_an_apple = False - self.demo_over = False - self.demo_over_and_position_safe = False - self.apple_unlocked_for_agent = False - - - self.target_obj = self.env.apple - - self.waiting_counter = 0 - self.wait_steps = 4 - - assert self.env.grammar.contains_utterance(self.introduction_statement) - - def draw_npc_face(self, c): - assert self.npc_type == 1 - - assert all(COLORS[self.color] == c) - - shapes = [] - shapes_colors = [] - - # Draw eyes - shapes.append(point_in_circle(cx=0.70, cy=0.50, r=0.10)) - shapes_colors.append(c) - - shapes.append(point_in_circle(cx=0.30, cy=0.50, r=0.10)) - shapes_colors.append(c) - - # Draw mouth - shapes.append(point_in_rect(0.20, 0.80, 0.72, 0.81)) - shapes_colors.append(c) - - # Draw eyebrows - shapes.append(point_in_triangle((0.15, 0.20), - (0.85, 0.20), - (0.50, 0.35))) - shapes_colors.append(c) - - shapes.append(point_in_triangle((0.30, 0.20), - (0.70, 0.20), - (0.5, 0.35))) - shapes_colors.append((0,0,0)) - - return shapes, shapes_colors - - def can_see_pos(self, obj_pos): - - # is the npc seen by the agent - npc_view_obj = self.relative_coords(*obj_pos) - grid, vis_mask = self.gen_obs_grid() - - if npc_view_obj is not None: - # in the agent's field of view - ag_view_npc_x, ag_view_npc_y = npc_view_obj - - # is it occluded - object_observed = vis_mask[ag_view_npc_x, ag_view_npc_y] - else: - object_observed = False - - return object_observed, grid, vis_mask - - def step(self, utterance): - reply, info = super().step() - - if self.env.hidden_npc: - return reply, info - - # reply, action = self.handle_introduction(utterance) # revert this? - reply, action = None, None - - NPC_movement = self.env.parameters.get("NPC_movement", "Rotating") - - if self.waiting_counter >= self.wait_steps: - self.waiting_counter = 0 - - if NPC_movement == "Rotating": - action = random.choice([self.rotate_left, self.rotate_right]) - - elif NPC_movement == "Walking": - action = random.choice([ - random.choice([ - self.rotate_left, # 25 % - self.rotate_right # 25 % - ]), - self.go_forward # 50% - ]) - else: - raise DeprecationWarning(f"Undefined movement option {NPC_movement}") - - else: - self.waiting_counter += 1 - - if action is not None: - action() - - info = { - "prim_action": action.__name__ if action is not None else "no_op", - "utterance": reply or "no_op", - "was_introduced_to": self.was_introduced_to - } - - assert (reply or "no_op") in self.list_of_possible_utterances - - return reply, info - - -class AppleStealingEnv(MultiModalMiniGridEnv): - """ - Environment in which the agent is instructed to go to a given object - named using an English text string - """ - - def __init__( - self, - size=10, - diminished_reward=True, - step_penalty=False, - knowledgeable=False, - max_steps=80, - hidden_npc=False, - switch_no_light=False, - reward_diminish_factor=0.1, - see_through_walls=False, - egocentric_observation=True, - tagged_apple=False, - ): - assert size >= 5 - self.empty_symbol = "NA \n" - self.diminished_reward = diminished_reward - self.step_penalty = step_penalty - self.knowledgeable = knowledgeable - self.hidden_npc = hidden_npc - self.hear_yourself = False - self.switch_no_light = switch_no_light - - self.grammar = SocialAIGrammar() - - self.init_done = False - # parameters - to be set in reset - self.parameters = None - - # encoding size should be 5 - self.add_npc_direction = True - self.add_npc_point_direction = True - self.add_npc_last_prim_action = True - - self.reward_diminish_factor = reward_diminish_factor - - self.egocentric_observation = egocentric_observation - self.encoding_size = 3 + 2*bool(not self.egocentric_observation) + bool(self.add_npc_direction) + bool(self.add_npc_point_direction) + bool(self.add_npc_last_prim_action) - - super().__init__( - grid_size=size, - max_steps=max_steps, - # Set this to True for maximum speed - see_through_walls=see_through_walls, - actions=SocialAIActions, # primitive actions - action_space=SocialAIActionSpace, - add_npc_direction=self.add_npc_direction, - add_npc_point_direction=self.add_npc_point_direction, - add_npc_last_prim_action=self.add_npc_last_prim_action, - reward_diminish_factor=self.reward_diminish_factor, - ) - self.all_npc_utterance_actions = AppleGuardingNPC.get_list_of_possible_utterances() - self.prim_actions_dict = SocialAINPCActionsDict - - self.tagged_apple = tagged_apple - - def _gen_grid(self, width_, height_): - # Create the grid - self.grid = Grid(width_, height_, nb_obj_dims=self.encoding_size) - - # new - self.current_width = self._rand_int(7, width_+1) - self.current_height = self._rand_int(7, height_+1) - # print("Room size: {}x{}".format(self.current_width, self.current_height)) - - self.wall_x = self.current_width-1 - self.wall_y = self.current_height-1 - - self.version = self.parameters["Version"] if self.parameters else "Asocial" - - # Generate the surrounding walls - self.grid.wall_rect(0, 0, self.current_width, self.current_height) - - self.add_obstacles() - - # apple - self.apple_pos = (self.current_width, self.current_height) - - # find the position for the apple/box/generator_platform - - self.apple_current_pos = self.find_loc(size=self.apple_pos, reject_agent_pos=True, reject_taken_pos=True) - assert all(self.apple_current_pos < np.array([self.current_width-1, self.current_height-1])) - - self.apple = Apple() - self.put_obj_np(self.apple, self.apple_current_pos) - - # NPC - color = self._rand_elem(COLOR_NAMES) - self.caretaker = AppleGuardingNPC(color, "Peer", self) - - if self.version == "Social": - self.place_obj(self.caretaker, size=(self.current_width, self.current_height)) - - # Randomize the agent's start position and orientation - self.place_agent(size=(self.current_width, self.current_height)) - - # Generate the mission string - self.mission = 'undefined' - - # Dummy beginning string - # self.beginning_string = "This is what you hear. \n" - self.beginning_string = "Conversation: \n" - self.utterance = self.beginning_string - - # utterance appended at the end of each step - self.utterance_history = "" - - # used for rendering - self.full_conversation = self.utterance - self.outcome_info = None - - - - def reset( - self, *args, **kwargs - ): - # This env must be used inside the parametric env - if not kwargs: - # The only place when kwargs can empty is during the class construction - # reset should be called again before using the env (paramenv does it in its constructor) - assert self.parameters is None - assert not self.init_done - self.init_done = True - - obs = super().reset() - return obs - - else: - assert self.init_done - - self.parameters = dict(kwargs) - - assert self.parameters is not None - assert len(self.parameters) > 0 - - obs = super().reset() - - self.agent_ate_the_apple = False - - return obs - - def step(self, action): - - success = False - - p_action = action[0] - utterance_action = action[1:] - - apple_had_been_eaten = self.apple.eaten - if self.version == "Social": - agent_seen_by_npc, _, _ = self.caretaker.can_see_pos(self.agent_pos) - else: - agent_seen_by_npc = False - - # primitive actions - _, reward, done, info = super().step(p_action) - - if not self.agent_ate_the_apple: - self.agent_ate_the_apple = self.apple.eaten and not apple_had_been_eaten - - # utterances - agent_spoke = not all(np.isnan(utterance_action)) - if agent_spoke: - utterance = self.grammar.construct_utterance(utterance_action) - - if self.hear_yourself: - self.utterance += "YOU: {} \n".format(utterance) - self.full_conversation += "YOU: {} \n".format(utterance) - else: - utterance = None - - if self.version == "Social": - reply, npc_info = self.caretaker.step(utterance) - if reply: - self.utterance += "{}: {} \n".format(self.caretaker.name, reply) - self.full_conversation += "{}: {} \n".format(self.caretaker.name, reply) - - else: - npc_info = { - "prim_action": "no_op", - "utterance": "no_op", - "was_introduced_to": False, - } - - # aftermath - if p_action == self.actions.done: - done = True - - elif self.agent_ate_the_apple: - # check that it is the agent who ate it - assert self.actions(p_action) == self.actions.toggle - assert self.get_cell(*self.front_pos) == self.apple - - if agent_seen_by_npc: - reward = 0 - success = False - - else: - reward = self._reward() - success = True - - done = True - - # check that it is the agent who ate it - assert self.actions(p_action) == self.actions.toggle - assert self.get_cell(*self.front_pos) == self.apple - - # discount - if self.step_penalty: - reward = reward - 0.01 - - # update obs with NPC movement - obs = self.gen_obs(full_obs=self.full_obs) - - # fill observation with text - self.append_existing_utterance_to_history() - obs = self.add_utterance_to_observation(obs) - self.reset_utterance() - - # for rendering - if done: - if reward > 0: - self.outcome_info = "SUCCESS: agent got {} reward \n".format(np.round(reward, 1)) - else: - self.outcome_info = "FAILURE: agent got {} reward \n".format(reward) - - if self.version == "Social": - # is the npc seen by the agent - ag_view_npc = self.relative_coords(*self.caretaker.cur_pos) - - if ag_view_npc is not None: - # in the agent's field of view - ag_view_npc_x, ag_view_npc_y = ag_view_npc - - n_dims = obs['image'].shape[-1] - npc_encoding = self.caretaker.encode(n_dims) - - # is it occluded - npc_observed = all(obs['image'][ag_view_npc_x, ag_view_npc_y] == npc_encoding) - else: - npc_observed = False - - else: - npc_observed = False - - info = {**info, **{"NPC_"+k: v for k, v in npc_info.items()}} - - info["NPC_observed"] = npc_observed - info["success"] = success - assert success == (reward > 0) - - return obs, reward, done, info - - def _reward(self): - if self.diminished_reward: - return super()._reward() - else: - return 1.0 - - def render(self, *args, **kwargs): - obs = super().render(*args, show_dialogue=False, **kwargs) - return obs - - -register( - id='SocialAI-AppleStealingEnv-v0', - entry_point='gym_minigrid.social_ai_envs:AppleStealingEnv' -) diff --git a/spaces/flowers-team/SocialAISchool/models/randtalkmultiheadedac.py b/spaces/flowers-team/SocialAISchool/models/randtalkmultiheadedac.py deleted file mode 100644 index f23a07583b6ef27f38ef6ec7f38eeada9ad9e765..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/models/randtalkmultiheadedac.py +++ /dev/null @@ -1,169 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.distributions.categorical import Categorical -import torch_ac - - -from utils.other import init_params - - - - -class RandomTalkingMultiHeadedACModel(nn.Module, torch_ac.RecurrentACModel): - def __init__(self, obs_space, action_space, use_memory=False, use_text=False, use_dialogue=False): - super().__init__() - - # Decide which components are enabled - self.use_text = use_text - self.use_dialogue = use_dialogue - self.use_memory = use_memory - - # multi dim - if action_space.shape == (): - raise ValueError("The action space is not multi modal. Use ACModel instead.") - - self.n_primitive_actions = action_space.nvec[0] + 1 - self.talk_action = int(self.n_primitive_actions) - 1 - self.n_utterance_actions = action_space.nvec[1:] - self.env_action_space = action_space - self.model_raw_action_space = spaces.MultiDiscrete([self.n_primitive_actions, *self.n_utterance_actions]) - - # Define image embedding - self.image_conv = nn.Sequential( - nn.Conv2d(3, 16, (2, 2)), - nn.ReLU(), - nn.MaxPool2d((2, 2)), - nn.Conv2d(16, 32, (2, 2)), - nn.ReLU(), - nn.Conv2d(32, 64, (2, 2)), - nn.ReLU() - ) - n = obs_space["image"][0] - m = obs_space["image"][1] - self.image_embedding_size = ((n-1)//2-2)*((m-1)//2-2)*64 - - # Define memory - if self.use_memory: - self.memory_rnn = nn.LSTMCell(self.image_embedding_size, self.semi_memory_size) - - if self.use_text or self.use_dialogue: - self.word_embedding_size = 32 - self.word_embedding = nn.Embedding(obs_space["text"], self.word_embedding_size) - - # Define text embedding - if self.use_text: - self.text_embedding_size = 128 - self.text_rnn = nn.GRU(self.word_embedding_size, self.text_embedding_size, batch_first=True) - - # Define dialogue embedding - if self.use_dialogue: - self.dialogue_embedding_size = 128 - self.dialogue_rnn = nn.GRU(self.word_embedding_size, self.dialogue_embedding_size, batch_first=True) - - # Resize image embedding - self.embedding_size = self.semi_memory_size - - if self.use_text: - self.embedding_size += self.text_embedding_size - - if self.use_dialogue: - self.embedding_size += self.dialogue_embedding_size - - # Define actor's model - self.actor = nn.Sequential( - nn.Linear(self.embedding_size, 64), - nn.Tanh(), - nn.Linear(64, self.n_primitive_actions) - ) - - # Define critic's model - self.critic = nn.Sequential( - nn.Linear(self.embedding_size, 64), - nn.Tanh(), - nn.Linear(64, 1) - ) - - - # Initialize parameters correctly - self.apply(init_params) - - @property - def memory_size(self): - return 2*self.semi_memory_size - - @property - def semi_memory_size(self): - return self.image_embedding_size - - def forward(self, obs, memory): - x = obs.image.transpose(1, 3).transpose(2, 3) - x = self.image_conv(x) - - batch_size = x.shape[0] - x = x.reshape(batch_size, -1) - - if self.use_memory: - hidden = (memory[:, :self.semi_memory_size], memory[:, self.semi_memory_size:]) - hidden = self.memory_rnn(x, hidden) - embedding = hidden[0] - memory = torch.cat(hidden, dim=1) - else: - embedding = x - - if self.use_text: - embed_text = self._get_embed_text(obs.text) - embedding = torch.cat((embedding, embed_text), dim=1) - - if self.use_dialogue: - embed_dial = self._get_embed_dialogue(obs.dialogue) - embedding = torch.cat((embedding, embed_dial), dim=1) - - x = self.actor(embedding) - primtive_actions_dist = Categorical(logits=F.log_softmax(x, dim=1)) - - x = self.critic(embedding) - value = x.squeeze(1) - - # construct utterance action distributions, for this model they are radndom - utterance_actions_dists = [Categorical(logits=torch.ones((batch_size, n), requires_grad=False)) for n in self.n_utterance_actions] - - dist = [primtive_actions_dist] + utterance_actions_dists - - return dist, value, memory - - def sample_action(self, dist): - return torch.stack([d.sample() for d in dist], dim=1) - - def calculate_log_probs(self, dist, action): - return torch.stack([d.log_prob(action[:, i]) for i, d in enumerate(dist)], dim=1) - - def calculate_action_masks(self, action): - talk_mask = action[:, 0] == self.talk_action - mask = torch.stack( - (torch.ones_like(talk_mask), talk_mask, talk_mask), - dim=1).detach() - - assert action.shape == mask.shape - - return mask - - def construct_final_action(self, action): - act_mask = action[:, 0] != self.n_primitive_actions - 1 - - nan_mask = np.array([ - np.array([1, np.nan, np.nan]) if t else np.array([np.nan, 1, 1]) for t in act_mask - ]) - - action = nan_mask*action - - return action - - def _get_embed_text(self, text): - _, hidden = self.text_rnn(self.word_embedding(text)) - return hidden[-1] - - def _get_embed_dialogue(self, dial): - _, hidden = self.dialogue_rnn(self.word_embedding(dial)) - return hidden[-1] \ No newline at end of file diff --git a/spaces/fspecii/midi-composer/app.py b/spaces/fspecii/midi-composer/app.py deleted file mode 100644 index 9a78d4ca2a91c8ec9ec4315dab3fe72d0a381aec..0000000000000000000000000000000000000000 --- a/spaces/fspecii/midi-composer/app.py +++ /dev/null @@ -1,291 +0,0 @@ -import argparse -import glob -import os.path - -import gradio as gr -import numpy as np -import onnxruntime as rt -import tqdm -import json -from huggingface_hub import hf_hub_download - -import MIDI -from midi_synthesizer import synthesis -from midi_tokenizer import MIDITokenizer - -in_space = os.getenv("SYSTEM") == "spaces" - - -def softmax(x, axis): - x_max = np.amax(x, axis=axis, keepdims=True) - exp_x_shifted = np.exp(x - x_max) - return exp_x_shifted / np.sum(exp_x_shifted, axis=axis, keepdims=True) - - -def sample_top_p_k(probs, p, k): - probs_idx = np.argsort(-probs, axis=-1) - probs_sort = np.take_along_axis(probs, probs_idx, -1) - probs_sum = np.cumsum(probs_sort, axis=-1) - mask = probs_sum - probs_sort > p - probs_sort[mask] = 0.0 - mask = np.zeros(probs_sort.shape[-1]) - mask[:k] = 1 - probs_sort = probs_sort * mask - probs_sort /= np.sum(probs_sort, axis=-1, keepdims=True) - shape = probs_sort.shape - probs_sort_flat = probs_sort.reshape(-1, shape[-1]) - probs_idx_flat = probs_idx.reshape(-1, shape[-1]) - next_token = np.stack([np.random.choice(idxs, p=pvals) for pvals, idxs in zip(probs_sort_flat, probs_idx_flat)]) - next_token = next_token.reshape(*shape[:-1]) - return next_token - - -def generate(model, prompt=None, max_len=512, temp=1.0, top_p=0.98, top_k=20, - disable_patch_change=False, disable_control_change=False, disable_channels=None): - if disable_channels is not None: - disable_channels = [tokenizer.parameter_ids["channel"][c] for c in disable_channels] - else: - disable_channels = [] - max_token_seq = tokenizer.max_token_seq - if prompt is None: - input_tensor = np.full((1, max_token_seq), tokenizer.pad_id, dtype=np.int64) - input_tensor[0, 0] = tokenizer.bos_id # bos - else: - prompt = prompt[:, :max_token_seq] - if prompt.shape[-1] < max_token_seq: - prompt = np.pad(prompt, ((0, 0), (0, max_token_seq - prompt.shape[-1])), - mode="constant", constant_values=tokenizer.pad_id) - input_tensor = prompt - input_tensor = input_tensor[None, :, :] - cur_len = input_tensor.shape[1] - bar = tqdm.tqdm(desc="generating", total=max_len - cur_len, disable=in_space) - with bar: - while cur_len < max_len: - end = False - hidden = model[0].run(None, {'x': input_tensor})[0][:, -1] - next_token_seq = np.empty((1, 0), dtype=np.int64) - event_name = "" - for i in range(max_token_seq): - mask = np.zeros(tokenizer.vocab_size, dtype=np.int64) - if i == 0: - mask_ids = list(tokenizer.event_ids.values()) + [tokenizer.eos_id] - if disable_patch_change: - mask_ids.remove(tokenizer.event_ids["patch_change"]) - if disable_control_change: - mask_ids.remove(tokenizer.event_ids["control_change"]) - mask[mask_ids] = 1 - else: - param_name = tokenizer.events[event_name][i - 1] - mask_ids = tokenizer.parameter_ids[param_name] - if param_name == "channel": - mask_ids = [i for i in mask_ids if i not in disable_channels] - mask[mask_ids] = 1 - logits = model[1].run(None, {'x': next_token_seq, "hidden": hidden})[0][:, -1:] - scores = softmax(logits / temp, -1) * mask - sample = sample_top_p_k(scores, top_p, top_k) - if i == 0: - next_token_seq = sample - eid = sample.item() - if eid == tokenizer.eos_id: - end = True - break - event_name = tokenizer.id_events[eid] - else: - next_token_seq = np.concatenate([next_token_seq, sample], axis=1) - if len(tokenizer.events[event_name]) == i: - break - if next_token_seq.shape[1] < max_token_seq: - next_token_seq = np.pad(next_token_seq, ((0, 0), (0, max_token_seq - next_token_seq.shape[-1])), - mode="constant", constant_values=tokenizer.pad_id) - next_token_seq = next_token_seq[None, :, :] - input_tensor = np.concatenate([input_tensor, next_token_seq], axis=1) - cur_len += 1 - bar.update(1) - yield next_token_seq.reshape(-1) - if end: - break - - -def create_msg(name, data): - return {"name": name, "data": data} - - -def run(model_name, tab, instruments, drum_kit, mid, midi_events, gen_events, temp, top_p, top_k, allow_cc): - mid_seq = [] - gen_events = int(gen_events) - max_len = gen_events - - disable_patch_change = False - disable_channels = None - if tab == 0: - i = 0 - mid = [[tokenizer.bos_id] + [tokenizer.pad_id] * (tokenizer.max_token_seq - 1)] - patches = {} - for instr in instruments: - patches[i] = patch2number[instr] - i = (i + 1) if i != 8 else 10 - if drum_kit != "None": - patches[9] = drum_kits2number[drum_kit] - for i, (c, p) in enumerate(patches.items()): - mid.append(tokenizer.event2tokens(["patch_change", 0, 0, i, c, p])) - mid_seq = mid - mid = np.asarray(mid, dtype=np.int64) - if len(instruments) > 0: - disable_patch_change = True - disable_channels = [i for i in range(16) if i not in patches] - elif mid is not None: - mid = tokenizer.tokenize(MIDI.midi2score(mid)) - mid = np.asarray(mid, dtype=np.int64) - mid = mid[:int(midi_events)] - max_len += len(mid) - for token_seq in mid: - mid_seq.append(token_seq.tolist()) - init_msgs = [create_msg("visualizer_clear", None)] - for tokens in mid_seq: - init_msgs.append(create_msg("visualizer_append", tokenizer.tokens2event(tokens))) - yield mid_seq, None, None, init_msgs - model = models[model_name] - generator = generate(model, mid, max_len=max_len, temp=temp, top_p=top_p, top_k=top_k, - disable_patch_change=disable_patch_change, disable_control_change=not allow_cc, - disable_channels=disable_channels) - for i, token_seq in enumerate(generator): - token_seq = token_seq.tolist() - mid_seq.append(token_seq) - event = tokenizer.tokens2event(token_seq) - yield mid_seq, None, None, [create_msg("visualizer_append", event), create_msg("progress", [i + 1, gen_events])] - mid = tokenizer.detokenize(mid_seq) - with open(f"output.mid", 'wb') as f: - f.write(MIDI.score2midi(mid)) - audio = synthesis(MIDI.score2opus(mid), soundfont_path) - yield mid_seq, "output.mid", (44100, audio), [create_msg("visualizer_end", None)] - - -def cancel_run(mid_seq): - if mid_seq is None: - return None, None - mid = tokenizer.detokenize(mid_seq) - with open(f"output.mid", 'wb') as f: - f.write(MIDI.score2midi(mid)) - audio = synthesis(MIDI.score2opus(mid), soundfont_path) - return "output.mid", (44100, audio), [create_msg("visualizer_end", None)] - - -def load_javascript(dir="javascript"): - scripts_list = glob.glob(f"{dir}/*.js") - javascript = "" - for path in scripts_list: - with open(path, "r", encoding="utf8") as jsfile: - javascript += f"\n" - template_response_ori = gr.routes.templates.TemplateResponse - - def template_response(*args, **kwargs): - res = template_response_ori(*args, **kwargs) - res.body = res.body.replace( - b'', f'{javascript}'.encode("utf8")) - res.init_headers() - return res - - gr.routes.templates.TemplateResponse = template_response - - -class JSMsgReceiver(gr.HTML): - - def __init__(self, **kwargs): - super().__init__(elem_id="msg_receiver", visible=False, **kwargs) - - def postprocess(self, y): - if y: - y = f"

          {json.dumps(y)}

          " - return super().postprocess(y) - - def get_block_name(self) -> str: - return "html" - - -number2drum_kits = {-1: "None", 0: "Standard", 8: "Room", 16: "Power", 24: "Electric", 25: "TR-808", 32: "Jazz", - 40: "Blush", 48: "Orchestra"} -patch2number = {v: k for k, v in MIDI.Number2patch.items()} -drum_kits2number = {v: k for k, v in number2drum_kits.items()} - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - parser.add_argument("--port", type=int, default=7860, help="gradio server port") - parser.add_argument("--max-gen", type=int, default=1024, help="max") - opt = parser.parse_args() - soundfont_path = hf_hub_download(repo_id="skytnt/midi-model", filename="soundfont.sf2") - models_info = {"generic pretrain model": ["skytnt/midi-model", ""], - "j-pop finetune model": ["skytnt/midi-model-ft", "jpop/"], - "ambsd finetune model": ["fspecii/ambp", "ambsd/"]} - models = {} - tokenizer = MIDITokenizer() - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - for name, (repo_id, path) in models_info.items(): - model_base_path = hf_hub_download(repo_id=repo_id, filename=f"{path}onnx/model_base.onnx") - model_token_path = hf_hub_download(repo_id=repo_id, filename=f"{path}onnx/model_token.onnx") - model_base = rt.InferenceSession(model_base_path, providers=providers) - model_token = rt.InferenceSession(model_token_path, providers=providers) - models[name] = [model_base, model_token] - - load_javascript() - app = gr.Blocks() - with app: - gr.Markdown("

          Midi Composer

          ") - gr.Markdown("![Visitors](https://api.visitorbadge.io/api/visitors?path=skytnt.midi-composer&style=flat)\n\n" - "Midi event transformer for music generation\n\n" - "Demo for [SkyTNT/midi-model](https://github.com/SkyTNT/midi-model)\n\n" - "[Open In Colab]" - "(https://colab.research.google.com/github/SkyTNT/midi-model/blob/main/demo.ipynb)" - " for faster running and longer generation" - ) - js_msg = JSMsgReceiver() - input_model = gr.Dropdown(label="select model", choices=list(models.keys()), - type="value", value=list(models.keys())[0]) - tab_select = gr.Variable(value=0) - with gr.Tabs(): - with gr.TabItem("instrument prompt") as tab1: - input_instruments = gr.Dropdown(label="instruments (auto if empty)", choices=list(patch2number.keys()), - multiselect=True, max_choices=15, type="value") - input_drum_kit = gr.Dropdown(label="drum kit", choices=list(drum_kits2number.keys()), type="value", - value="None") - example1 = gr.Examples([ - [[], "None"], - [["Acoustic Grand"], "None"], - [["Acoustic Grand", "Violin", "Viola", "Cello", "Contrabass"], "Orchestra"], - [["Flute", "Cello", "Bassoon", "Tuba"], "None"], - [["Violin", "Viola", "Cello", "Contrabass", "Trumpet", "French Horn", "Brass Section", - "Flute", "Piccolo", "Tuba", "Trombone", "Timpani"], "Orchestra"], - [["Acoustic Guitar(nylon)", "Acoustic Guitar(steel)", "Electric Guitar(jazz)", - "Electric Guitar(clean)", "Electric Guitar(muted)", "Overdriven Guitar", "Distortion Guitar", - "Electric Bass(finger)"], "Standard"] - ], [input_instruments, input_drum_kit]) - with gr.TabItem("midi prompt") as tab2: - input_midi = gr.File(label="input midi", file_types=[".midi", ".mid"], type="binary") - input_midi_events = gr.Slider(label="use first n midi events as prompt", minimum=1, maximum=512, - step=1, - value=128) - example2 = gr.Examples([[file, 128] for file in glob.glob("example/*.mid")], - [input_midi, input_midi_events]) - - tab1.select(lambda: 0, None, tab_select, queue=False) - tab2.select(lambda: 1, None, tab_select, queue=False) - input_gen_events = gr.Slider(label="generate n midi events", minimum=1, maximum=opt.max_gen, - step=1, value=opt.max_gen // 2) - with gr.Accordion("options", open=False): - input_temp = gr.Slider(label="temperature", minimum=0.1, maximum=1.2, step=0.01, value=1) - input_top_p = gr.Slider(label="top p", minimum=0.1, maximum=1, step=0.01, value=0.98) - input_top_k = gr.Slider(label="top k", minimum=1, maximum=20, step=1, value=12) - input_allow_cc = gr.Checkbox(label="allow midi cc event", value=True) - example3 = gr.Examples([[1, 0.98, 12], [1.2, 0.95, 8]], [input_temp, input_top_p, input_top_k]) - run_btn = gr.Button("generate", variant="primary") - stop_btn = gr.Button("stop and output") - output_midi_seq = gr.Variable() - output_midi_visualizer = gr.HTML(elem_id="midi_visualizer_container") - output_audio = gr.Audio(label="output audio", format="mp3", elem_id="midi_audio") - output_midi = gr.File(label="output midi", file_types=[".mid"]) - run_event = run_btn.click(run, [input_model, tab_select, input_instruments, input_drum_kit, input_midi, - input_midi_events, input_gen_events, input_temp, input_top_p, input_top_k, - input_allow_cc], - [output_midi_seq, output_midi, output_audio, js_msg]) - stop_btn.click(cancel_run, output_midi_seq, [output_midi, output_audio, js_msg], cancels=run_event, queue=False) - app.queue(2).launch(server_port=opt.port, share=opt.share, inbrowser=True) \ No newline at end of file diff --git a/spaces/g4f/freegpt-webui/g4f/Provider/Providers/Easychat.py b/spaces/g4f/freegpt-webui/g4f/Provider/Providers/Easychat.py deleted file mode 100644 index 389196cc3eb71161479d9fe70a7890579fa96ab8..0000000000000000000000000000000000000000 --- a/spaces/g4f/freegpt-webui/g4f/Provider/Providers/Easychat.py +++ /dev/null @@ -1,27 +0,0 @@ -import requests -import os -import json -from ...typing import sha256, Dict, get_type_hints - -url = 'https://free.easychat.work' -model = ['gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs): - headers = { - 'Content-Type': 'application/json', - } - data = { - 'model':model, - 'temperature': 0.7, - 'presence_penalty': 0, - 'messages': messages, - } - response = requests.post(url + '/api/openai/v1/chat/completions', - json=data, stream=stream) - - yield response.json()['choices'][0]['message']['content'] - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/gagan3012/IMD/images/readme.md b/spaces/gagan3012/IMD/images/readme.md deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/utils.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/utils.py deleted file mode 100644 index c5befb8e56ece50b5fecfd007b26f8a29124c0bd..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/utils.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import random -import sys -import time -import warnings -from getpass import getuser -from socket import gethostname - -import numpy as np -import torch - -import annotator.uniformer.mmcv as mmcv - - -def get_host_info(): - """Get hostname and username. - - Return empty string if exception raised, e.g. ``getpass.getuser()`` will - lead to error in docker container - """ - host = '' - try: - host = f'{getuser()}@{gethostname()}' - except Exception as e: - warnings.warn(f'Host or user not found: {str(e)}') - finally: - return host - - -def get_time_str(): - return time.strftime('%Y%m%d_%H%M%S', time.localtime()) - - -def obj_from_dict(info, parent=None, default_args=None): - """Initialize an object from dict. - - The dict must contain the key "type", which indicates the object type, it - can be either a string or type, such as "list" or ``list``. Remaining - fields are treated as the arguments for constructing the object. - - Args: - info (dict): Object types and arguments. - parent (:class:`module`): Module which may containing expected object - classes. - default_args (dict, optional): Default arguments for initializing the - object. - - Returns: - any type: Object built from the dict. - """ - assert isinstance(info, dict) and 'type' in info - assert isinstance(default_args, dict) or default_args is None - args = info.copy() - obj_type = args.pop('type') - if mmcv.is_str(obj_type): - if parent is not None: - obj_type = getattr(parent, obj_type) - else: - obj_type = sys.modules[obj_type] - elif not isinstance(obj_type, type): - raise TypeError('type must be a str or valid type, but ' - f'got {type(obj_type)}') - if default_args is not None: - for name, value in default_args.items(): - args.setdefault(name, value) - return obj_type(**args) - - -def set_random_seed(seed, deterministic=False, use_rank_shift=False): - """Set random seed. - - Args: - seed (int): Seed to be used. - deterministic (bool): Whether to set the deterministic option for - CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` - to True and `torch.backends.cudnn.benchmark` to False. - Default: False. - rank_shift (bool): Whether to add rank number to the random seed to - have different random seed in different threads. Default: False. - """ - if use_rank_shift: - rank, _ = mmcv.runner.get_dist_info() - seed += rank - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - os.environ['PYTHONHASHSEED'] = str(seed) - if deterministic: - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False diff --git a/spaces/gligen/demo/gligen/ldm/modules/distributions/__init__.py b/spaces/gligen/demo/gligen/ldm/modules/distributions/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/golem4300/RVC-TTS/README.md b/spaces/golem4300/RVC-TTS/README.md deleted file mode 100644 index 11f4ae415ea820a8c82c69da45fe909c104c5611..0000000000000000000000000000000000000000 --- a/spaces/golem4300/RVC-TTS/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: RVC TTS Demo -emoji: 🚀 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -license: gpl-3.0 -duplicated_from: ImPavloh/RVC-TTS-Demo ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/group2test/Protogen_x3.4_Official_Release/README.md b/spaces/group2test/Protogen_x3.4_Official_Release/README.md deleted file mode 100644 index 7c66bedad272898ce6070b23d187d56e7905508b..0000000000000000000000000000000000000000 --- a/spaces/group2test/Protogen_x3.4_Official_Release/README.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: 🤗 No-code Gradio Demo Tutorial -emoji: 💻 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false ---- - - -# 🤗 No-code Gradio Demo Tutorial - -This space contains a Gradio demo that was created **without typing any code**! -It enables you to generate photorealistic images based on a prompt. - -Here are the steps to take to create your own demo *in a few minutes* without any code: - -1. You can easily create a demo using [this HF space](https://huggingface.co/spaces/anzorq/sd-space-creator). You will have to provide a token with write access to continue as shown in the image below: - -

          - gradio-demo-tutorial-1 - -

          - -2. If your token is valid, a prompt will enable you to specify the URL or ID of the model on the HF Hub you would like to use in this demo. For example [darkstorm2150/Protogen_x3.4_Official_Release](https://huggingface.co/darkstorm2150/Protogen_x3.4_Official_Release), which is a diffusion model able to generate photorealistic images from a prompt. Click on "Load model". - -

          - gradio-demo-tutorial-2 - -

          - -3. Then, you can specify a few details about your demo, such as: - - Name - - Title - - Description - - Space type - - Hardware (CPU? GPU?) -Click on "Create the space". Your space will now be getting built. - -

          - gradio-demo-tutorial-3 - -

          - -4. Once your space is built, you can reach it at https://huggingface.co/spaces/SPACE_NAME. - -5. You can enter a prompt to generate an image with the space you just built! The demo embedded in this space was created that way without typing any code! - -

          - gradio-demo-tutorial-5 - -

          diff --git a/spaces/gtx4010661/dandelin-vilt-b32-finetuned-vqa/app.py b/spaces/gtx4010661/dandelin-vilt-b32-finetuned-vqa/app.py deleted file mode 100644 index 07f4abab652bc7ebf13f61160be67be837cae28d..0000000000000000000000000000000000000000 --- a/spaces/gtx4010661/dandelin-vilt-b32-finetuned-vqa/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/dandelin/vilt-b32-finetuned-vqa").launch() \ No newline at end of file diff --git a/spaces/gui-sparim/Calculadoras_DDA/app.py b/spaces/gui-sparim/Calculadoras_DDA/app.py deleted file mode 100644 index c8949943b13fe3230cfebd55c63b5acad25de957..0000000000000000000000000000000000000000 --- a/spaces/gui-sparim/Calculadoras_DDA/app.py +++ /dev/null @@ -1,23 +0,0 @@ -import numpy as np -import gradio as gr -import boxes -import convert - -bloco = gr.Blocks() - -with bloco: - with gr.Tabs(): - with gr.TabItem(boxes.title): - gr.Markdown(boxes.description) - inputs = boxes.load_inputs() - text_button = gr.Button("Calcular") - outputs = gr.TextArea(lines=1, label=boxes.output_label) - text_button.click(boxes.execute, inputs=inputs, outputs=outputs) - with gr.TabItem(convert.title): - gr.Markdown(convert.description) - inputs = convert.load_inputs() - text_button = gr.Button("Calcular") - outputs = gr.TextArea(lines=1, label=convert.output_label) - text_button.click(convert.execute, inputs=inputs, outputs=outputs) - -bloco.launch() \ No newline at end of file diff --git a/spaces/gundruke/ua-thesis-absa/app.py b/spaces/gundruke/ua-thesis-absa/app.py deleted file mode 100644 index 8a5033841f3778359a0f8b241669e3b1a88c46c3..0000000000000000000000000000000000000000 --- a/spaces/gundruke/ua-thesis-absa/app.py +++ /dev/null @@ -1,155 +0,0 @@ -import gradio as gr -import torch -import json -from nltk.corpus import wordnet -from transformers import AutoConfig, AutoTokenizer -from models import BERTLstmCRF -from huggingface_hub import hf_hub_download -import os -import nltk - -os.system("python -m nltk.downloader all") - -checkpoint = "gundruke/bert-lstm-crf-absa" -config = AutoConfig.from_pretrained(checkpoint) -id2label = config.id2label - -tokenizer = AutoTokenizer.from_pretrained("gundruke/bert-lstm-crf-absa") -model = BERTLstmCRF(config) - - -repo = "gundruke/bert-lstm-crf-absa" -filename = "pytorch_model.bin" -model.load_state_dict(torch.load(hf_hub_download(repo_id=repo, filename=filename), - map_location=torch.device('cpu'))) - -dictionary_file_path = hf_hub_download(repo_id=repo, filename="dictionary.json") - -def tokenize_text(text): - tokens = tokenizer.tokenize(text) - tokenized_text = tokenizer(text) - - return tokens, tokenized_text - - -def convert_to_multilabel(label_list): - multilabel = [] - if "B-POS" in label_list or "I-POS" in label_list: - multilabel.append("Positive") - if "B-NEG" in label_list or "I-NEG" in label_list: - multilabel.append("Negative") - if "B-NEU" in label_list or "I-NEU" in label_list: - multilabel.append("Neutral") - - return " and ".join(multilabel) - - -def classify_word(word, dictionary): - synsets = wordnet.synsets(word) - if synsets: - hypernyms = synsets[0].hypernyms() # Get the hypernym of the first synset - if hypernyms: - nltk_result = hypernyms[0].lemmas()[0].name() - else: - nltk_result = "Unknown" - else: - nltk_result = "Unknown" - - if word in dictionary: - result = dictionary[word] - elif nltk_result in ['atmosphere', 'drinks', 'food', 'price', 'service']: - result = nltk_result - else: - result = 'other' - - return result, nltk_result - - -def get_outputs(tokenized_text): - input_ids = tokenized_text["input_ids"] - token_type_ids = tokenized_text["token_type_ids"] - attention_mask = tokenized_text["attention_mask"] - - inputs = { - 'input_ids': torch.tensor([input_ids]), - 'token_type_ids': torch.tensor([token_type_ids]), - 'attention_mask': torch.tensor([attention_mask]) - } - - with torch.no_grad(): - outputs = model(**inputs) - - labels = [id2label.get(i) for i in torch.flatten(outputs[1]).tolist()][1:-1] - - return labels - - -def join_wordpieces(tokens, labels): - joined_tokens = [] - - for token, label in zip(tokens, labels): - if label == "O": - label = None - if token.startswith("##"): - last_token = joined_tokens[-1][0] - joined_tokens[-1] = (last_token+token[2:], label) - else: - joined_tokens.append((token, label)) - - return joined_tokens - - -def get_category(word, dict_file): - with open(dict_file, "r") as file: - dictionary = json.load(file) - - r, n = classify_word(word, dictionary) - - return r - - -def text_analysis(text): - tokens, tokenized_text = tokenize_text(text) - labels = get_outputs(tokenized_text) - multilabel = convert_to_multilabel(labels) - - token_tuple = join_wordpieces(tokens, labels) - tokenized_text["tokens"] = tokens - - categories = [] - for tok in token_tuple: - if tok[1]: - categories.append((tok[0], get_category(tok[0], dictionary_file_path))) - else: - categories.append((tok[0], None)) - - - - - return token_tuple, multilabel, categories - - -theme = gr.themes.Base() -with gr.Blocks(theme=theme) as demo: - with gr.Column(): - input_textbox = gr.Textbox(placeholder="Enter sentence here...") - btn = gr.Button("Submit", variant="primary") - - btn.click(fn=text_analysis, - inputs=input_textbox, - outputs=[gr.HighlightedText(label="Token labels"), - gr.Label(label="Multilabel classification"), - gr.HighlightedText(label="Category")], - queue=False) - - with gr.Column(): - examples=[ - ["I've been coming here as a child and always come back for the taste."], - ["The tea is great and all the sweets are homemade."], - ["Strong build which really adds to its durability but poor battery life."], - ["We loved the recommendation for the wine, and I think the eggplant parmigiana appetizer should become an entree."], - ["chicken pasta was tasty, wine was super nice but waiter was rude."] - ] - gr.Examples(examples, input_textbox) - -demo.launch(debug=True) diff --git a/spaces/h2oai/wave-tour/examples/ml_dai_autodoc.py b/spaces/h2oai/wave-tour/examples/ml_dai_autodoc.py deleted file mode 100644 index 24e5129b695f5eafb227af12d5b9dd92f7d565a9..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/ml_dai_autodoc.py +++ /dev/null @@ -1,156 +0,0 @@ -# WaveML / DAI / AutoDoc -# Download AutoDoc for Wave Models built using Driverless AI. -# --- -import os - -from h2o_wave import main, app, Q, copy_expando, ui -from h2o_wave_ml import build_model, ModelType -from h2o_wave_ml.utils import list_dai_instances, save_autodoc - -from sklearn.datasets import load_wine -from sklearn.model_selection import train_test_split - -STEAM_URL = os.environ.get('STEAM_URL') -MLOPS_URL = os.environ.get('MLOPS_URL') - -DATASET_TEXT = '''The sample dataset used is the - wine dataset.''' -STEAM_TEXT = f'''No Driverless AI instances available. You may create one in - AI Engines and refresh the page.''' - - -def dai_experiment_url(instance_id: str, instance_name: str): - # URL link to Driverless AI experiment - return f'''**Driverless AI Experiment:** - {instance_name}''' - - -def mlops_deployment_url(project_id: str): - # URL link to MLOps deployment - return f'**MLOps Deployment:** {project_id}' - - -def form_unsupported(): - # display when app is not running on cloud - return [ - ui.text('''This example requires access to Driverless AI running on - H2O AI Cloud - and does not support standalone app instances.'''), - ui.text('''Sign up at https://h2o.ai/free - to run apps on cloud.''') - ] - - -def form_default(q: Q): - # display when app is initialized - return [ - ui.text(content=DATASET_TEXT), - ui.dropdown(name='dai_instance_id', label='Select Driverless AI instance', value=q.client.dai_instance_id, - choices=q.client.choices_dai_instances, required=True), - ui.text(content=STEAM_TEXT, visible=q.client.disable_training), - ui.button(name='train', label='Train', primary=True, disabled=q.client.disable_training) - ] - - -def form_training_progress(q: Q): - # display when model training is in progress - return [ - ui.text(content=DATASET_TEXT), - ui.dropdown(name='dai_instance_id', label='Select Driverless AI instance', value=q.client.dai_instance_id, - choices=q.client.choices_dai_instances, required=True), - ui.button(name='train', label='Train', primary=True, disabled=q.client.disable_training), - ui.progress(label='Training in progress...', caption='This can take a few minutes...'), - ui.text(content=q.client.model_details) - ] - - -def form_training_completed(q: Q): - # display when model training is completed - return [ - ui.text(content=DATASET_TEXT), - ui.dropdown(name='dai_instance_id', label='Select Driverless AI instance', value=q.client.dai_instance_id, - choices=q.client.choices_dai_instances, required=True), - ui.button(name='train', label='Train', primary=True, disabled=q.client.disable_training), - ui.message_bar(type='success', text='Training successfully completed!'), - ui.text(content=q.client.model_details), - ui.text(content=f'**Download:** AutoDoc') - ] - - -@app('/demo') -async def serve(q: Q): - if 'H2O_CLOUD_ENVIRONMENT' not in os.environ: - # show appropriate message if app is not running on cloud - q.page['example'] = ui.form_card( - box='1 1 -1 -1', - items=form_unsupported() - ) - elif q.args.train: - # get DAI instance name - copy_expando(q.args, q.client) - - for dai_instance in q.client.dai_instances: - if dai_instance['id'] == int(q.client.dai_instance_id): - q.client.dai_instance_name = dai_instance['name'] - - # set DAI model details - q.client.model_details = dai_experiment_url(q.client.dai_instance_id, q.client.dai_instance_name) - - # show training progress and details - q.page['example'].items = form_training_progress(q) - await q.page.save() - - # train WaveML Model using Driverless AI - q.client.wave_model = await q.run( - func=build_model, - train_df=q.client.train_df, - target_column='target', - model_type=ModelType.DAI, - refresh_token=q.auth.refresh_token, - _steam_dai_instance_name=q.client.dai_instance_name, - _dai_accuracy=1, - _dai_time=1, - _dai_interpretability=10 - ) - - # update DAI model details - q.client.project_id = q.client.wave_model.project_id - q.client.model_details += f'
          {mlops_deployment_url(q.client.project_id)}' - - # download AutoDoc - path_autodoc = save_autodoc( - project_id=q.client.project_id, - output_dir_path='.', - refresh_token=q.auth.refresh_token - ) - - q.client.path_autodoc, *_ = await q.site.upload([path_autodoc]) - - # show model outputs - q.page['example'].items = form_training_completed(q) - else: - # prepare sample train and test dataframes - data = load_wine(as_frame=True)['frame'] - q.client.train_df, q.client.test_df = train_test_split(data, train_size=0.8) - - # DAI instances - q.client.dai_instances = list_dai_instances(refresh_token=q.auth.refresh_token) - q.client.choices_dai_instances = [ - ui.choice( - name=str(x['id']), - label=f'{x["name"]} ({x["status"].capitalize()})', - disabled=x['status'] != 'running' - ) for x in q.client.dai_instances - ] - - running_dai_instances = [x['id'] for x in q.client.dai_instances if x['status'] == 'running'] - q.client.disable_training = False if running_dai_instances else True - q.client.dai_instance_id = str(running_dai_instances[0]) if running_dai_instances else '' - - # display ui - q.page['example'] = ui.form_card( - box='1 1 -1 -1', - items=form_default(q) - ) - - await q.page.save() diff --git a/spaces/h2oai/wave-tour/examples/spinbox.py b/spaces/h2oai/wave-tour/examples/spinbox.py deleted file mode 100644 index 765807dacdc3ed6b7adf0b92576c3800ef7178df..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/spinbox.py +++ /dev/null @@ -1,23 +0,0 @@ -# Form / Spinbox -# Use a #spinbox to allow users to incrementally adjust a value in small steps. -# #form -# --- -from h2o_wave import main, app, Q, ui - - -@app('/demo') -async def serve(q: Q): - if q.args.show_inputs: - q.page['example'].items = [ - ui.text(f'spinbox={q.args.spinbox}'), - ui.text(f'spinbox_disabled={q.args.spinbox_disabled}'), - ui.button(name='show_form', label='Back', primary=True), - ] - else: - q.page['example'] = ui.form_card(box='1 1 4 5', items=[ - ui.spinbox(name='spinbox', label='Standard spinbox', min=0, max=100, step=10, value=30), - ui.spinbox(name='spinbox_disabled', label='Disabled spinbox', min=0, max=100, step=10, value=30, - disabled=True), - ui.button(name='show_inputs', label='Submit', primary=True), - ]) - await q.page.save() diff --git a/spaces/h2oai/wave-tour/examples/tall_info.py b/spaces/h2oai/wave-tour/examples/tall_info.py deleted file mode 100644 index 987733ae0e84ba5f9b30462b0ac4760c9a3e431d..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/tall_info.py +++ /dev/null @@ -1,24 +0,0 @@ -# Info / Tall -# Create a tall information card displaying a title, caption, and either an icon or image. -# --- -from h2o_wave import main, app, Q, ui - - -@app('/demo') -async def serve(q: Q): - if q.args.info_card: - q.page['example'] = ui.form_card(box='1 1 2 4', items=[ - ui.button(name='back', label='Go back', primary=True), - ]) - else: - q.page['example'] = ui.tall_info_card( - box='1 1 2 5', - name='info_card', - title='Info Card', - caption='Lorem ipsum dolor sit amet consectetur adipisicing elit.', - category='Category', - label='Click me', - image='https://images.pexels.com/photos/3225517/pexels-photo-3225517.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=750&w=1260', - ) - - await q.page.save() diff --git a/spaces/hanzportgas/rvc-models-v2/infer_pack/commons.py b/spaces/hanzportgas/rvc-models-v2/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/hanzportgas/rvc-models-v2/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign.h b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign.h deleted file mode 100644 index 2d95eac6e29d5e5624afbc6c545776d78ebc709c..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign.h +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -#pragma once -#include - -namespace detectron2 { - -at::Tensor ROIAlign_forward_cpu( - const at::Tensor& input, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int sampling_ratio, - bool aligned); - -at::Tensor ROIAlign_backward_cpu( - const at::Tensor& grad, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int batch_size, - const int channels, - const int height, - const int width, - const int sampling_ratio, - bool aligned); - -#ifdef WITH_CUDA -at::Tensor ROIAlign_forward_cuda( - const at::Tensor& input, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int sampling_ratio, - bool aligned); - -at::Tensor ROIAlign_backward_cuda( - const at::Tensor& grad, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int batch_size, - const int channels, - const int height, - const int width, - const int sampling_ratio, - bool aligned); -#endif - -// Interface for Python -inline at::Tensor ROIAlign_forward( - const at::Tensor& input, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int sampling_ratio, - bool aligned) { - if (input.is_cuda()) { -#ifdef WITH_CUDA - return ROIAlign_forward_cuda( - input, - rois, - spatial_scale, - pooled_height, - pooled_width, - sampling_ratio, - aligned); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - return ROIAlign_forward_cpu( - input, - rois, - spatial_scale, - pooled_height, - pooled_width, - sampling_ratio, - aligned); -} - -inline at::Tensor ROIAlign_backward( - const at::Tensor& grad, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int batch_size, - const int channels, - const int height, - const int width, - const int sampling_ratio, - bool aligned) { - if (grad.is_cuda()) { -#ifdef WITH_CUDA - return ROIAlign_backward_cuda( - grad, - rois, - spatial_scale, - pooled_height, - pooled_width, - batch_size, - channels, - height, - width, - sampling_ratio, - aligned); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - return ROIAlign_backward_cpu( - grad, - rois, - spatial_scale, - pooled_height, - pooled_width, - batch_size, - channels, - height, - width, - sampling_ratio, - aligned); -} - -} // namespace detectron2 diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/modeling/roi_heads/roi_heads.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/modeling/roi_heads/roi_heads.py deleted file mode 100644 index f35588e474a1c3d938e5a3b2b8a8ae5e88006215..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/modeling/roi_heads/roi_heads.py +++ /dev/null @@ -1,812 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import inspect -import logging -import numpy as np -from typing import Dict, List, Optional, Tuple, Union -import torch -from torch import nn - -from detectron2.config import configurable -from detectron2.layers import ShapeSpec -from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou -from detectron2.utils.events import get_event_storage -from detectron2.utils.registry import Registry - -from ..backbone.resnet import BottleneckBlock, make_stage -from ..matcher import Matcher -from ..poolers import ROIPooler -from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals -from ..sampling import subsample_labels -from .box_head import build_box_head -from .fast_rcnn import FastRCNNOutputLayers -from .keypoint_head import build_keypoint_head -from .mask_head import build_mask_head - -ROI_HEADS_REGISTRY = Registry("ROI_HEADS") -ROI_HEADS_REGISTRY.__doc__ = """ -Registry for ROI heads in a generalized R-CNN model. -ROIHeads take feature maps and region proposals, and -perform per-region computation. - -The registered object will be called with `obj(cfg, input_shape)`. -The call is expected to return an :class:`ROIHeads`. -""" - -logger = logging.getLogger(__name__) - - -def build_roi_heads(cfg, input_shape): - """ - Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`. - """ - name = cfg.MODEL.ROI_HEADS.NAME - return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape) - - -def select_foreground_proposals( - proposals: List[Instances], bg_label: int -) -> Tuple[List[Instances], List[torch.Tensor]]: - """ - Given a list of N Instances (for N images), each containing a `gt_classes` field, - return a list of Instances that contain only instances with `gt_classes != -1 && - gt_classes != bg_label`. - - Args: - proposals (list[Instances]): A list of N Instances, where N is the number of - images in the batch. - bg_label: label index of background class. - - Returns: - list[Instances]: N Instances, each contains only the selected foreground instances. - list[Tensor]: N boolean vector, correspond to the selection mask of - each Instances object. True for selected instances. - """ - assert isinstance(proposals, (list, tuple)) - assert isinstance(proposals[0], Instances) - assert proposals[0].has("gt_classes") - fg_proposals = [] - fg_selection_masks = [] - for proposals_per_image in proposals: - gt_classes = proposals_per_image.gt_classes - fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label) - fg_idxs = fg_selection_mask.nonzero().squeeze(1) - fg_proposals.append(proposals_per_image[fg_idxs]) - fg_selection_masks.append(fg_selection_mask) - return fg_proposals, fg_selection_masks - - -def select_proposals_with_visible_keypoints(proposals: List[Instances]) -> List[Instances]: - """ - Args: - proposals (list[Instances]): a list of N Instances, where N is the - number of images. - - Returns: - proposals: only contains proposals with at least one visible keypoint. - - Note that this is still slightly different from Detectron. - In Detectron, proposals for training keypoint head are re-sampled from - all the proposals with IOU>threshold & >=1 visible keypoint. - - Here, the proposals are first sampled from all proposals with - IOU>threshold, then proposals with no visible keypoint are filtered out. - This strategy seems to make no difference on Detectron and is easier to implement. - """ - ret = [] - all_num_fg = [] - for proposals_per_image in proposals: - # If empty/unannotated image (hard negatives), skip filtering for train - if len(proposals_per_image) == 0: - ret.append(proposals_per_image) - continue - gt_keypoints = proposals_per_image.gt_keypoints.tensor - # #fg x K x 3 - vis_mask = gt_keypoints[:, :, 2] >= 1 - xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1] - proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4 - kp_in_box = ( - (xs >= proposal_boxes[:, :, 0]) - & (xs <= proposal_boxes[:, :, 2]) - & (ys >= proposal_boxes[:, :, 1]) - & (ys <= proposal_boxes[:, :, 3]) - ) - selection = (kp_in_box & vis_mask).any(dim=1) - selection_idxs = torch.nonzero(selection, as_tuple=True)[0] - all_num_fg.append(selection_idxs.numel()) - ret.append(proposals_per_image[selection_idxs]) - - storage = get_event_storage() - storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg)) - return ret - - -class ROIHeads(torch.nn.Module): - """ - ROIHeads perform all per-region computation in an R-CNN. - - It typically contains logic to - 1. (in training only) match proposals with ground truth and sample them - 2. crop the regions and extract per-region features using proposals - 3. make per-region predictions with different heads - - It can have many variants, implemented as subclasses of this class. - This base class contains the logic to match/sample proposals. - But it is not necessary to inherit this class if the sampling logic is not needed. - """ - - @configurable - def __init__( - self, - *, - num_classes, - batch_size_per_image, - positive_sample_fraction, - proposal_matcher, - proposal_append_gt=True - ): - """ - NOTE: this interface is experimental. - - Args: - num_classes (int): number of classes. Used to label background proposals. - batch_size_per_image (int): number of proposals to use for training - positive_sample_fraction (float): fraction of positive (foreground) proposals - to use for training. - proposal_matcher (Matcher): matcher that matches proposals and ground truth - proposal_append_gt (bool): whether to include ground truth as proposals as well - """ - super().__init__() - self.batch_size_per_image = batch_size_per_image - self.positive_sample_fraction = positive_sample_fraction - self.num_classes = num_classes - self.proposal_matcher = proposal_matcher - self.proposal_append_gt = proposal_append_gt - - @classmethod - def from_config(cls, cfg): - return { - "batch_size_per_image": cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, - "positive_sample_fraction": cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION, - "num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES, - "proposal_append_gt": cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT, - # Matcher to assign box proposals to gt boxes - "proposal_matcher": Matcher( - cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS, - cfg.MODEL.ROI_HEADS.IOU_LABELS, - allow_low_quality_matches=False, - ), - } - - def _sample_proposals( - self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Based on the matching between N proposals and M groundtruth, - sample the proposals and set their classification labels. - - Args: - matched_idxs (Tensor): a vector of length N, each is the best-matched - gt index in [0, M) for each proposal. - matched_labels (Tensor): a vector of length N, the matcher's label - (one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal. - gt_classes (Tensor): a vector of length M. - - Returns: - Tensor: a vector of indices of sampled proposals. Each is in [0, N). - Tensor: a vector of the same length, the classification label for - each sampled proposal. Each sample is labeled as either a category in - [0, num_classes) or the background (num_classes). - """ - has_gt = gt_classes.numel() > 0 - # Get the corresponding GT for each proposal - if has_gt: - gt_classes = gt_classes[matched_idxs] - # Label unmatched proposals (0 label from matcher) as background (label=num_classes) - gt_classes[matched_labels == 0] = self.num_classes - # Label ignore proposals (-1 label) - gt_classes[matched_labels == -1] = -1 - else: - gt_classes = torch.zeros_like(matched_idxs) + self.num_classes - - sampled_fg_idxs, sampled_bg_idxs = subsample_labels( - gt_classes, self.batch_size_per_image, self.positive_sample_fraction, self.num_classes - ) - - sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0) - return sampled_idxs, gt_classes[sampled_idxs] - - @torch.no_grad() - def label_and_sample_proposals( - self, proposals: List[Instances], targets: List[Instances] - ) -> List[Instances]: - """ - Prepare some proposals to be used to train the ROI heads. - It performs box matching between `proposals` and `targets`, and assigns - training labels to the proposals. - It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth - boxes, with a fraction of positives that is no larger than - ``self.positive_sample_fraction``. - - Args: - See :meth:`ROIHeads.forward` - - Returns: - list[Instances]: - length `N` list of `Instances`s containing the proposals - sampled for training. Each `Instances` has the following fields: - - - proposal_boxes: the proposal boxes - - gt_boxes: the ground-truth box that the proposal is assigned to - (this is only meaningful if the proposal has a label > 0; if label = 0 - then the ground-truth box is random) - - Other fields such as "gt_classes", "gt_masks", that's included in `targets`. - """ - gt_boxes = [x.gt_boxes for x in targets] - # Augment proposals with ground-truth boxes. - # In the case of learned proposals (e.g., RPN), when training starts - # the proposals will be low quality due to random initialization. - # It's possible that none of these initial - # proposals have high enough overlap with the gt objects to be used - # as positive examples for the second stage components (box head, - # cls head, mask head). Adding the gt boxes to the set of proposals - # ensures that the second stage components will have some positive - # examples from the start of training. For RPN, this augmentation improves - # convergence and empirically improves box AP on COCO by about 0.5 - # points (under one tested configuration). - if self.proposal_append_gt: - proposals = add_ground_truth_to_proposals(gt_boxes, proposals) - - proposals_with_gt = [] - - num_fg_samples = [] - num_bg_samples = [] - for proposals_per_image, targets_per_image in zip(proposals, targets): - has_gt = len(targets_per_image) > 0 - match_quality_matrix = pairwise_iou( - targets_per_image.gt_boxes, proposals_per_image.proposal_boxes - ) - matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix) - sampled_idxs, gt_classes = self._sample_proposals( - matched_idxs, matched_labels, targets_per_image.gt_classes - ) - - # Set target attributes of the sampled proposals: - proposals_per_image = proposals_per_image[sampled_idxs] - proposals_per_image.gt_classes = gt_classes - - # We index all the attributes of targets that start with "gt_" - # and have not been added to proposals yet (="gt_classes"). - if has_gt: - sampled_targets = matched_idxs[sampled_idxs] - # NOTE: here the indexing waste some compute, because heads - # like masks, keypoints, etc, will filter the proposals again, - # (by foreground/background, or number of keypoints in the image, etc) - # so we essentially index the data twice. - for (trg_name, trg_value) in targets_per_image.get_fields().items(): - if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name): - proposals_per_image.set(trg_name, trg_value[sampled_targets]) - else: - gt_boxes = Boxes( - targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4)) - ) - proposals_per_image.gt_boxes = gt_boxes - - num_bg_samples.append((gt_classes == self.num_classes).sum().item()) - num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1]) - proposals_with_gt.append(proposals_per_image) - - # Log the number of fg/bg samples that are selected for training ROI heads - storage = get_event_storage() - storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) - storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) - - return proposals_with_gt - - def forward( - self, - images: ImageList, - features: Dict[str, torch.Tensor], - proposals: List[Instances], - targets: Optional[List[Instances]] = None, - ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]: - """ - Args: - images (ImageList): - features (dict[str,Tensor]): input data as a mapping from feature - map name to tensor. Axis 0 represents the number of images `N` in - the input data; axes 1-3 are channels, height, and width, which may - vary between feature maps (e.g., if a feature pyramid is used). - proposals (list[Instances]): length `N` list of `Instances`. The i-th - `Instances` contains object proposals for the i-th input image, - with fields "proposal_boxes" and "objectness_logits". - targets (list[Instances], optional): length `N` list of `Instances`. The i-th - `Instances` contains the ground-truth per-instance annotations - for the i-th input image. Specify `targets` during training only. - It may have the following fields: - - - gt_boxes: the bounding box of each instance. - - gt_classes: the label for each instance with a category ranging in [0, #class]. - - gt_masks: PolygonMasks or BitMasks, the ground-truth masks of each instance. - - gt_keypoints: NxKx3, the groud-truth keypoints for each instance. - - Returns: - list[Instances]: length `N` list of `Instances` containing the - detected instances. Returned during inference only; may be [] during training. - - dict[str->Tensor]: - mapping from a named loss to a tensor storing the loss. Used during training only. - """ - raise NotImplementedError() - - -@ROI_HEADS_REGISTRY.register() -class Res5ROIHeads(ROIHeads): - """ - The ROIHeads in a typical "C4" R-CNN model, where - the box and mask head share the cropping and - the per-region feature computation by a Res5 block. - """ - - def __init__(self, cfg, input_shape): - super().__init__(cfg) - - # fmt: off - self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES - pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION - pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE - pooler_scales = (1.0 / input_shape[self.in_features[0]].stride, ) - sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO - self.mask_on = cfg.MODEL.MASK_ON - # fmt: on - assert not cfg.MODEL.KEYPOINT_ON - assert len(self.in_features) == 1 - - self.pooler = ROIPooler( - output_size=pooler_resolution, - scales=pooler_scales, - sampling_ratio=sampling_ratio, - pooler_type=pooler_type, - ) - - self.res5, out_channels = self._build_res5_block(cfg) - self.box_predictor = FastRCNNOutputLayers( - cfg, ShapeSpec(channels=out_channels, height=1, width=1) - ) - - if self.mask_on: - self.mask_head = build_mask_head( - cfg, - ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution), - ) - - def _build_res5_block(self, cfg): - # fmt: off - stage_channel_factor = 2 ** 3 # res5 is 8x res2 - num_groups = cfg.MODEL.RESNETS.NUM_GROUPS - width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP - bottleneck_channels = num_groups * width_per_group * stage_channel_factor - out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor - stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 - norm = cfg.MODEL.RESNETS.NORM - assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \ - "Deformable conv is not yet supported in res5 head." - # fmt: on - - blocks = make_stage( - BottleneckBlock, - 3, - first_stride=2, - in_channels=out_channels // 2, - bottleneck_channels=bottleneck_channels, - out_channels=out_channels, - num_groups=num_groups, - norm=norm, - stride_in_1x1=stride_in_1x1, - ) - return nn.Sequential(*blocks), out_channels - - def _shared_roi_transform(self, features, boxes): - x = self.pooler(features, boxes) - return self.res5(x) - - def forward(self, images, features, proposals, targets=None): - """ - See :meth:`ROIHeads.forward`. - """ - del images - - if self.training: - assert targets - proposals = self.label_and_sample_proposals(proposals, targets) - del targets - - proposal_boxes = [x.proposal_boxes for x in proposals] - box_features = self._shared_roi_transform( - [features[f] for f in self.in_features], proposal_boxes - ) - predictions = self.box_predictor(box_features.mean(dim=[2, 3])) - - if self.training: - del features - losses = self.box_predictor.losses(predictions, proposals) - if self.mask_on: - proposals, fg_selection_masks = select_foreground_proposals( - proposals, self.num_classes - ) - # Since the ROI feature transform is shared between boxes and masks, - # we don't need to recompute features. The mask loss is only defined - # on foreground proposals, so we need to select out the foreground - # features. - mask_features = box_features[torch.cat(fg_selection_masks, dim=0)] - del box_features - losses.update(self.mask_head(mask_features, proposals)) - return [], losses - else: - pred_instances, _ = self.box_predictor.inference(predictions, proposals) - pred_instances = self.forward_with_given_boxes(features, pred_instances) - return pred_instances, {} - - def forward_with_given_boxes(self, features, instances): - """ - Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. - - Args: - features: same as in `forward()` - instances (list[Instances]): instances to predict other outputs. Expect the keys - "pred_boxes" and "pred_classes" to exist. - - Returns: - instances (Instances): - the same `Instances` object, with extra - fields such as `pred_masks` or `pred_keypoints`. - """ - assert not self.training - assert instances[0].has("pred_boxes") and instances[0].has("pred_classes") - - if self.mask_on: - features = [features[f] for f in self.in_features] - x = self._shared_roi_transform(features, [x.pred_boxes for x in instances]) - return self.mask_head(x, instances) - else: - return instances - - -@ROI_HEADS_REGISTRY.register() -class StandardROIHeads(ROIHeads): - """ - It's "standard" in a sense that there is no ROI transform sharing - or feature sharing between tasks. - Each head independently processes the input features by each head's - own pooler and head. - - This class is used by most models, such as FPN and C5. - To implement more models, you can subclass it and implement a different - :meth:`forward()` or a head. - """ - - @configurable - def __init__( - self, - *, - box_in_features: List[str], - box_pooler: ROIPooler, - box_head: nn.Module, - box_predictor: nn.Module, - mask_in_features: Optional[List[str]] = None, - mask_pooler: Optional[ROIPooler] = None, - mask_head: Optional[nn.Module] = None, - keypoint_in_features: Optional[List[str]] = None, - keypoint_pooler: Optional[ROIPooler] = None, - keypoint_head: Optional[nn.Module] = None, - train_on_pred_boxes: bool = False, - **kwargs - ): - """ - NOTE: this interface is experimental. - - Args: - box_in_features (list[str]): list of feature names to use for the box head. - box_pooler (ROIPooler): pooler to extra region features for box head - box_head (nn.Module): transform features to make box predictions - box_predictor (nn.Module): make box predictions from the feature. - Should have the same interface as :class:`FastRCNNOutputLayers`. - mask_in_features (list[str]): list of feature names to use for the mask head. - None if not using mask head. - mask_pooler (ROIPooler): pooler to extra region features for mask head - mask_head (nn.Module): transform features to make mask predictions - keypoint_in_features, keypoint_pooler, keypoint_head: similar to ``mask*``. - train_on_pred_boxes (bool): whether to use proposal boxes or - predicted boxes from the box head to train other heads. - """ - super().__init__(**kwargs) - # keep self.in_features for backward compatibility - self.in_features = self.box_in_features = box_in_features - self.box_pooler = box_pooler - self.box_head = box_head - self.box_predictor = box_predictor - - self.mask_on = mask_in_features is not None - if self.mask_on: - self.mask_in_features = mask_in_features - self.mask_pooler = mask_pooler - self.mask_head = mask_head - self.keypoint_on = keypoint_in_features is not None - if self.keypoint_on: - self.keypoint_in_features = keypoint_in_features - self.keypoint_pooler = keypoint_pooler - self.keypoint_head = keypoint_head - - self.train_on_pred_boxes = train_on_pred_boxes - - @classmethod - def from_config(cls, cfg, input_shape): - ret = super().from_config(cfg) - ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES - # Subclasses that have not been updated to use from_config style construction - # may have overridden _init_*_head methods. In this case, those overridden methods - # will not be classmethods and we need to avoid trying to call them here. - # We test for this with ismethod which only returns True for bound methods of cls. - # Such subclasses will need to handle calling their overridden _init_*_head methods. - if inspect.ismethod(cls._init_box_head): - ret.update(cls._init_box_head(cfg, input_shape)) - if inspect.ismethod(cls._init_mask_head): - ret.update(cls._init_mask_head(cfg, input_shape)) - if inspect.ismethod(cls._init_keypoint_head): - ret.update(cls._init_keypoint_head(cfg, input_shape)) - return ret - - @classmethod - def _init_box_head(cls, cfg, input_shape): - # fmt: off - in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES - pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION - pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) - sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO - pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE - # fmt: on - - # If StandardROIHeads is applied on multiple feature maps (as in FPN), - # then we share the same predictors and therefore the channel counts must be the same - in_channels = [input_shape[f].channels for f in in_features] - # Check all channel counts are equal - assert len(set(in_channels)) == 1, in_channels - in_channels = in_channels[0] - - box_pooler = ROIPooler( - output_size=pooler_resolution, - scales=pooler_scales, - sampling_ratio=sampling_ratio, - pooler_type=pooler_type, - ) - # Here we split "box head" and "box predictor", which is mainly due to historical reasons. - # They are used together so the "box predictor" layers should be part of the "box head". - # New subclasses of ROIHeads do not need "box predictor"s. - box_head = build_box_head( - cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) - ) - box_predictor = FastRCNNOutputLayers(cfg, box_head.output_shape) - return { - "box_in_features": in_features, - "box_pooler": box_pooler, - "box_head": box_head, - "box_predictor": box_predictor, - } - - @classmethod - def _init_mask_head(cls, cfg, input_shape): - if not cfg.MODEL.MASK_ON: - return {} - # fmt: off - in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES - pooler_resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION - pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) - sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO - pooler_type = cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE - # fmt: on - - in_channels = [input_shape[f].channels for f in in_features][0] - - ret = {"mask_in_features": in_features} - ret["mask_pooler"] = ROIPooler( - output_size=pooler_resolution, - scales=pooler_scales, - sampling_ratio=sampling_ratio, - pooler_type=pooler_type, - ) - ret["mask_head"] = build_mask_head( - cfg, ShapeSpec(channels=in_channels, width=pooler_resolution, height=pooler_resolution) - ) - return ret - - @classmethod - def _init_keypoint_head(cls, cfg, input_shape): - if not cfg.MODEL.KEYPOINT_ON: - return {} - # fmt: off - in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES - pooler_resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION - pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) # noqa - sampling_ratio = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO - pooler_type = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE - # fmt: on - - in_channels = [input_shape[f].channels for f in in_features][0] - - ret = {"keypoint_in_features": in_features} - ret["keypoint_pooler"] = ROIPooler( - output_size=pooler_resolution, - scales=pooler_scales, - sampling_ratio=sampling_ratio, - pooler_type=pooler_type, - ) - ret["keypoint_head"] = build_keypoint_head( - cfg, ShapeSpec(channels=in_channels, width=pooler_resolution, height=pooler_resolution) - ) - return ret - - def forward( - self, - images: ImageList, - features: Dict[str, torch.Tensor], - proposals: List[Instances], - targets: Optional[List[Instances]] = None, - ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]: - """ - See :class:`ROIHeads.forward`. - """ - del images - if self.training: - assert targets - proposals = self.label_and_sample_proposals(proposals, targets) - del targets - - if self.training: - losses = self._forward_box(features, proposals) - # Usually the original proposals used by the box head are used by the mask, keypoint - # heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes - # predicted by the box head. - losses.update(self._forward_mask(features, proposals)) - losses.update(self._forward_keypoint(features, proposals)) - return proposals, losses - else: - pred_instances = self._forward_box(features, proposals) - # During inference cascaded prediction is used: the mask and keypoints heads are only - # applied to the top scoring box detections. - pred_instances = self.forward_with_given_boxes(features, pred_instances) - return pred_instances, {} - - def forward_with_given_boxes( - self, features: Dict[str, torch.Tensor], instances: List[Instances] - ) -> List[Instances]: - """ - Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. - - This is useful for downstream tasks where a box is known, but need to obtain - other attributes (outputs of other heads). - Test-time augmentation also uses this. - - Args: - features: same as in `forward()` - instances (list[Instances]): instances to predict other outputs. Expect the keys - "pred_boxes" and "pred_classes" to exist. - - Returns: - instances (list[Instances]): - the same `Instances` objects, with extra - fields such as `pred_masks` or `pred_keypoints`. - """ - assert not self.training - assert instances[0].has("pred_boxes") and instances[0].has("pred_classes") - - instances = self._forward_mask(features, instances) - instances = self._forward_keypoint(features, instances) - return instances - - def _forward_box( - self, features: Dict[str, torch.Tensor], proposals: List[Instances] - ) -> Union[Dict[str, torch.Tensor], List[Instances]]: - """ - Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`, - the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument. - - Args: - features (dict[str, Tensor]): mapping from feature map names to tensor. - Same as in :meth:`ROIHeads.forward`. - proposals (list[Instances]): the per-image object proposals with - their matching ground truth. - Each has fields "proposal_boxes", and "objectness_logits", - "gt_classes", "gt_boxes". - - Returns: - In training, a dict of losses. - In inference, a list of `Instances`, the predicted instances. - """ - features = [features[f] for f in self.box_in_features] - box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) - box_features = self.box_head(box_features) - predictions = self.box_predictor(box_features) - del box_features - - if self.training: - losses = self.box_predictor.losses(predictions, proposals) - # proposals is modified in-place below, so losses must be computed first. - if self.train_on_pred_boxes: - with torch.no_grad(): - pred_boxes = self.box_predictor.predict_boxes_for_gt_classes( - predictions, proposals - ) - for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes): - proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image) - return losses - else: - pred_instances, _ = self.box_predictor.inference(predictions, proposals) - return pred_instances - - def _forward_mask( - self, features: Dict[str, torch.Tensor], instances: List[Instances] - ) -> Union[Dict[str, torch.Tensor], List[Instances]]: - """ - Forward logic of the mask prediction branch. - - Args: - features (dict[str, Tensor]): mapping from feature map names to tensor. - Same as in :meth:`ROIHeads.forward`. - instances (list[Instances]): the per-image instances to train/predict masks. - In training, they can be the proposals. - In inference, they can be the predicted boxes. - - Returns: - In training, a dict of losses. - In inference, update `instances` with new fields "pred_masks" and return it. - """ - if not self.mask_on: - return {} if self.training else instances - - features = [features[f] for f in self.mask_in_features] - - if self.training: - # The loss is only defined on positive proposals. - proposals, _ = select_foreground_proposals(instances, self.num_classes) - proposal_boxes = [x.proposal_boxes for x in proposals] - mask_features = self.mask_pooler(features, proposal_boxes) - return self.mask_head(mask_features, proposals) - else: - pred_boxes = [x.pred_boxes for x in instances] - mask_features = self.mask_pooler(features, pred_boxes) - return self.mask_head(mask_features, instances) - - def _forward_keypoint( - self, features: Dict[str, torch.Tensor], instances: List[Instances] - ) -> Union[Dict[str, torch.Tensor], List[Instances]]: - """ - Forward logic of the keypoint prediction branch. - - Args: - features (dict[str, Tensor]): mapping from feature map names to tensor. - Same as in :meth:`ROIHeads.forward`. - instances (list[Instances]): the per-image instances to train/predict keypoints. - In training, they can be the proposals. - In inference, they can be the predicted boxes. - - Returns: - In training, a dict of losses. - In inference, update `instances` with new fields "pred_keypoints" and return it. - """ - if not self.keypoint_on: - return {} if self.training else instances - - features = [features[f] for f in self.keypoint_in_features] - - if self.training: - # The loss is defined on positive proposals with >=1 visible keypoints. - proposals, _ = select_foreground_proposals(instances, self.num_classes) - proposals = select_proposals_with_visible_keypoints(proposals) - proposal_boxes = [x.proposal_boxes for x in proposals] - - keypoint_features = self.keypoint_pooler(features, proposal_boxes) - return self.keypoint_head(keypoint_features, proposals) - else: - pred_boxes = [x.pred_boxes for x in instances] - keypoint_features = self.keypoint_pooler(features, pred_boxes) - return self.keypoint_head(keypoint_features, instances) diff --git a/spaces/hbestm/gpt-academic-play/crazy_functions/test_project/python/dqn/dqn.py b/spaces/hbestm/gpt-academic-play/crazy_functions/test_project/python/dqn/dqn.py deleted file mode 100644 index 6cea64d39baa7ff4c1e549869aaa4b0ae17779a9..0000000000000000000000000000000000000000 --- a/spaces/hbestm/gpt-academic-play/crazy_functions/test_project/python/dqn/dqn.py +++ /dev/null @@ -1,245 +0,0 @@ -from typing import Any, Dict, List, Optional, Tuple, Type, Union - -import gym -import numpy as np -import torch as th -from torch.nn import functional as F - -from stable_baselines3.common import logger -from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm -from stable_baselines3.common.preprocessing import maybe_transpose -from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule -from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update -from stable_baselines3.dqn.policies import DQNPolicy - - -class DQN(OffPolicyAlgorithm): - """ - Deep Q-Network (DQN) - - Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236 - Default hyperparameters are taken from the nature paper, - except for the optimizer and learning rate that were taken from Stable Baselines defaults. - - :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) - :param env: The environment to learn from (if registered in Gym, can be str) - :param learning_rate: The learning rate, it can be a function - of the current progress remaining (from 1 to 0) - :param buffer_size: size of the replay buffer - :param learning_starts: how many steps of the model to collect transitions for before learning starts - :param batch_size: Minibatch size for each gradient update - :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update - :param gamma: the discount factor - :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit - like ``(5, "step")`` or ``(2, "episode")``. - :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) - Set to ``-1`` means to do as many gradient steps as steps done in the environment - during the rollout. - :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer - at a cost of more complexity. - See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 - :param target_update_interval: update the target network every ``target_update_interval`` - environment steps. - :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced - :param exploration_initial_eps: initial value of random action probability - :param exploration_final_eps: final value of random action probability - :param max_grad_norm: The maximum value for the gradient clipping - :param tensorboard_log: the log location for tensorboard (if None, no logging) - :param create_eval_env: Whether to create a second environment that will be - used for evaluating the agent periodically. (Only available when passing string for the environment) - :param policy_kwargs: additional arguments to be passed to the policy on creation - :param verbose: the verbosity level: 0 no output, 1 info, 2 debug - :param seed: Seed for the pseudo random generators - :param device: Device (cpu, cuda, ...) on which the code should be run. - Setting it to auto, the code will be run on the GPU if possible. - :param _init_setup_model: Whether or not to build the network at the creation of the instance - """ - - def __init__( - self, - policy: Union[str, Type[DQNPolicy]], - env: Union[GymEnv, str], - learning_rate: Union[float, Schedule] = 1e-4, - buffer_size: int = 1000000, - learning_starts: int = 50000, - batch_size: Optional[int] = 32, - tau: float = 1.0, - gamma: float = 0.99, - train_freq: Union[int, Tuple[int, str]] = 4, - gradient_steps: int = 1, - optimize_memory_usage: bool = False, - target_update_interval: int = 10000, - exploration_fraction: float = 0.1, - exploration_initial_eps: float = 1.0, - exploration_final_eps: float = 0.05, - max_grad_norm: float = 10, - tensorboard_log: Optional[str] = None, - create_eval_env: bool = False, - policy_kwargs: Optional[Dict[str, Any]] = None, - verbose: int = 0, - seed: Optional[int] = None, - device: Union[th.device, str] = "auto", - _init_setup_model: bool = True, - ): - - super(DQN, self).__init__( - policy, - env, - DQNPolicy, - learning_rate, - buffer_size, - learning_starts, - batch_size, - tau, - gamma, - train_freq, - gradient_steps, - action_noise=None, # No action noise - policy_kwargs=policy_kwargs, - tensorboard_log=tensorboard_log, - verbose=verbose, - device=device, - create_eval_env=create_eval_env, - seed=seed, - sde_support=False, - optimize_memory_usage=optimize_memory_usage, - supported_action_spaces=(gym.spaces.Discrete,), - ) - - self.exploration_initial_eps = exploration_initial_eps - self.exploration_final_eps = exploration_final_eps - self.exploration_fraction = exploration_fraction - self.target_update_interval = target_update_interval - self.max_grad_norm = max_grad_norm - # "epsilon" for the epsilon-greedy exploration - self.exploration_rate = 0.0 - # Linear schedule will be defined in `_setup_model()` - self.exploration_schedule = None - self.q_net, self.q_net_target = None, None - - if _init_setup_model: - self._setup_model() - - def _setup_model(self) -> None: - super(DQN, self)._setup_model() - self._create_aliases() - self.exploration_schedule = get_linear_fn( - self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction - ) - - def _create_aliases(self) -> None: - self.q_net = self.policy.q_net - self.q_net_target = self.policy.q_net_target - - def _on_step(self) -> None: - """ - Update the exploration rate and target network if needed. - This method is called in ``collect_rollouts()`` after each step in the environment. - """ - if self.num_timesteps % self.target_update_interval == 0: - polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau) - - self.exploration_rate = self.exploration_schedule(self._current_progress_remaining) - logger.record("rollout/exploration rate", self.exploration_rate) - - def train(self, gradient_steps: int, batch_size: int = 100) -> None: - # Update learning rate according to schedule - self._update_learning_rate(self.policy.optimizer) - - losses = [] - for _ in range(gradient_steps): - # Sample replay buffer - replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) - - with th.no_grad(): - # Compute the next Q-values using the target network - next_q_values = self.q_net_target(replay_data.next_observations) - # Follow greedy policy: use the one with the highest value - next_q_values, _ = next_q_values.max(dim=1) - # Avoid potential broadcast issue - next_q_values = next_q_values.reshape(-1, 1) - # 1-step TD target - target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values - - # Get current Q-values estimates - current_q_values = self.q_net(replay_data.observations) - - # Retrieve the q-values for the actions from the replay buffer - current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long()) - - # Compute Huber loss (less sensitive to outliers) - loss = F.smooth_l1_loss(current_q_values, target_q_values) - losses.append(loss.item()) - - # Optimize the policy - self.policy.optimizer.zero_grad() - loss.backward() - # Clip gradient norm - th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) - self.policy.optimizer.step() - - # Increase update counter - self._n_updates += gradient_steps - - logger.record("train/n_updates", self._n_updates, exclude="tensorboard") - logger.record("train/loss", np.mean(losses)) - - def predict( - self, - observation: np.ndarray, - state: Optional[np.ndarray] = None, - mask: Optional[np.ndarray] = None, - deterministic: bool = False, - ) -> Tuple[np.ndarray, Optional[np.ndarray]]: - """ - Overrides the base_class predict function to include epsilon-greedy exploration. - - :param observation: the input observation - :param state: The last states (can be None, used in recurrent policies) - :param mask: The last masks (can be None, used in recurrent policies) - :param deterministic: Whether or not to return deterministic actions. - :return: the model's action and the next state - (used in recurrent policies) - """ - if not deterministic and np.random.rand() < self.exploration_rate: - if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space): - n_batch = observation.shape[0] - action = np.array([self.action_space.sample() for _ in range(n_batch)]) - else: - action = np.array(self.action_space.sample()) - else: - action, state = self.policy.predict(observation, state, mask, deterministic) - return action, state - - def learn( - self, - total_timesteps: int, - callback: MaybeCallback = None, - log_interval: int = 4, - eval_env: Optional[GymEnv] = None, - eval_freq: int = -1, - n_eval_episodes: int = 5, - tb_log_name: str = "DQN", - eval_log_path: Optional[str] = None, - reset_num_timesteps: bool = True, - ) -> OffPolicyAlgorithm: - - return super(DQN, self).learn( - total_timesteps=total_timesteps, - callback=callback, - log_interval=log_interval, - eval_env=eval_env, - eval_freq=eval_freq, - n_eval_episodes=n_eval_episodes, - tb_log_name=tb_log_name, - eval_log_path=eval_log_path, - reset_num_timesteps=reset_num_timesteps, - ) - - def _excluded_save_params(self) -> List[str]: - return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"] - - def _get_torch_save_params(self) -> Tuple[List[str], List[str]]: - state_dicts = ["policy", "policy.optimizer"] - - return state_dicts, [] diff --git a/spaces/huggan/cityscapes-pix2pix/README.md b/spaces/huggan/cityscapes-pix2pix/README.md deleted file mode 100644 index 7d338b1587e3ae8542f6deb882abce6b5114a397..0000000000000000000000000000000000000000 --- a/spaces/huggan/cityscapes-pix2pix/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Cityscapes Pix2pix -emoji: 🏢 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 2.9.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/hysts-samples/space-monitor/settings.py b/spaces/hysts-samples/space-monitor/settings.py deleted file mode 100644 index 24726c749e72c1f8c675a42209f783a420dea120..0000000000000000000000000000000000000000 --- a/spaces/hysts-samples/space-monitor/settings.py +++ /dev/null @@ -1,3 +0,0 @@ -import os - -MAX_NUM = int(os.getenv("MAX_NUM", "200")) diff --git a/spaces/inamXcontru/PoeticTTS/Al Ruqyah Al Shariah Full By Sheikh Idrees Abkar Listen and Download the Complete Islamic Exorcism.md b/spaces/inamXcontru/PoeticTTS/Al Ruqyah Al Shariah Full By Sheikh Idrees Abkar Listen and Download the Complete Islamic Exorcism.md deleted file mode 100644 index 37678f87721b7787da52e8889301b67c469d1032..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Al Ruqyah Al Shariah Full By Sheikh Idrees Abkar Listen and Download the Complete Islamic Exorcism.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Al Ruqyah Al Shariah Full By Sheikh Idrees Abkar


          Download Ziphttps://gohhs.com/2uz3MY



          -
          - aaccfb2cb3
          -
          -
          -

          diff --git a/spaces/inamXcontru/PoeticTTS/And Once Again 2 Full Movie HD 1080p Free Download Utorrent Movies What You Need to Know Before You Watch.md b/spaces/inamXcontru/PoeticTTS/And Once Again 2 Full Movie HD 1080p Free Download Utorrent Movies What You Need to Know Before You Watch.md deleted file mode 100644 index 3485e348820fb79fcd9acba0f44575aa60a26e17..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/And Once Again 2 Full Movie HD 1080p Free Download Utorrent Movies What You Need to Know Before You Watch.md +++ /dev/null @@ -1,6 +0,0 @@ -

          And Once Again 2 full movie hd 1080p free download utorrent movies


          Download Filehttps://gohhs.com/2uz4SY



          - - aaccfb2cb3
          -
          -
          -

          diff --git a/spaces/inamXcontru/PoeticTTS/Consumer Behavior Buying Having and Being 11th Edition PDF Download The Ultimate Resource for Marketers.md b/spaces/inamXcontru/PoeticTTS/Consumer Behavior Buying Having and Being 11th Edition PDF Download The Ultimate Resource for Marketers.md deleted file mode 100644 index a4efa3c9cd3036c95bceed42e7ba3839a461d554..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Consumer Behavior Buying Having and Being 11th Edition PDF Download The Ultimate Resource for Marketers.md +++ /dev/null @@ -1,6 +0,0 @@ -

          consumer behavior buying having and being 11th edition pdf download


          Download Ziphttps://gohhs.com/2uz3Xa



          - - aaccfb2cb3
          -
          -
          -

          diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Beograd Veciti Grad Knjiga Pdf _VERIFIED_ Download.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Beograd Veciti Grad Knjiga Pdf _VERIFIED_ Download.md deleted file mode 100644 index 8db1283123ee69e07d8b86a6503f7770830b5929..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Beograd Veciti Grad Knjiga Pdf _VERIFIED_ Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Beograd Veciti Grad Knjiga Pdf Download


          DOWNLOAD ===> https://urlin.us/2uEyCh



          -
          -Find more similar flip PDFs like BraneKnjiga2.Latinica: "Nije ovo mala knjiga". Download BraneKnjiga2.Latinica: "Nije ovo mala knjiga" PDF for ... 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Darksiders 2.dsl Error ((INSTALL)).md b/spaces/inplisQlawa/anything-midjourney-v4-1/Darksiders 2.dsl Error ((INSTALL)).md deleted file mode 100644 index eabdeb00d3a7b8f2c7e310a42bef8582ac946230..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Darksiders 2.dsl Error ((INSTALL)).md +++ /dev/null @@ -1,9 +0,0 @@ -
          -

          When you clicked the install button, you need to wait for over 30min for the installer to download the data for Darksiders 2.Darksiders 2.dsl Error. The installer also has a prompt for you to restart your computer, which is not really required at all. Nothing bad will happen if you just restart your computer after the installer is finished downloading. You will be able to play Darksiders 2 after that.

          -

          Hi, my daughter just ran the Darksiders 2 setup, and her computer ran into problems. I tried to do the check online update, and it was saying that it could not do the update as I am not connected. I have to wait until I get home. When I am home, I am going to try to do the check online update. I have tried the setup on my daughters computer before, and it has never had any problems.

          -

          Darksiders 2.dsl Error


          Download Ziphttps://urlin.us/2uEx8L



          -

          I'm getting the same error. It goes through all the menus and seems to run fine, but in the middle it drops to black screen with white text on it that says "Ver.5.3.0" over and over again in white text. I've tried restarting my computer repeatedly to no avail. Any suggestions?

          -

          I have the same problem. Its a big problem as my game doesn't work properly. The first game that I got is Darksiders 2. The time that I spent to finish the first game is already finished more than a month ago. I downloaded the game again but I still have this problem. We tried to contact the support of Darksiders 2, but unfortunately they are not available for that time.

          -

          Hello. Please help me; I'm really stuck on Darksiders 2 installation. After installation, the program doesn't start. My antivirus is recent, so I'm sure that it's not due to a virus. The installation is done correctly, and the demo of the game works properly. I set the virtual RAM to 64 MB. When I start the installation, this error appears: "DC for DX5 is required". We have the Windows 10, and the DirectX version is checked up to version 9.0. Please help me.

          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Autocad Inventor 2013 With !!TOP!! Crack Download.md b/spaces/inreVtussa/clothingai/Examples/Autocad Inventor 2013 With !!TOP!! Crack Download.md deleted file mode 100644 index 4e17b5044bbe31fb952df2ad3faddfd12d990265..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Autocad Inventor 2013 With !!TOP!! Crack Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Autocad Inventor 2013 With Crack Download


          Download File 🆓 https://tiurll.com/2uCkvV



          -
          -January 15, 2019 - Connect, consult and hire trusted industry experts on the Autodesk Services Marketplace. Find vendors for Inventor. Follow Autodesk. What will happen on January 15, 2019? Connect, consult, and hire trusted industry experts on the Autodesk Services Marketplace. Find vendors for Inventor. Follow Autodesk. The Autodesk Services Marketplace will take place on January 15, 2019 at 12:00 PM, where we will tell you about our new services and capabilities, including job postings for engineers. Join us today, January 15, 2019 at 12:30 pm BST. 8a78ff9644
          -
          -
          -

          diff --git a/spaces/inreVtussa/clothingai/Examples/Autodesk AutoCAD 2017 Full Keygen !!INSTALL!! X64.md b/spaces/inreVtussa/clothingai/Examples/Autodesk AutoCAD 2017 Full Keygen !!INSTALL!! X64.md deleted file mode 100644 index 03985b8c8b9d25c96099c78bd1eee21bba4f9ec4..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Autodesk AutoCAD 2017 Full Keygen !!INSTALL!! X64.md +++ /dev/null @@ -1,29 +0,0 @@ -
          -

          Autodesk AutoCAD 2017 Full Keygen X64: A Powerful and Versatile Design Software

          - -

          If you are looking for a professional and reliable software for creating 2D and 3D designs, you should consider Autodesk AutoCAD 2017 Full Keygen X64. This is the latest version of the popular AutoCAD software, which has been used by millions of engineers, architects, and designers around the world. In this article, we will review some of the key features and benefits of Autodesk AutoCAD 2017 Full Keygen X64, and show you how to download and install it on your computer.

          - -

          Key Features of Autodesk AutoCAD 2017 Full Keygen X64

          - -

          Autodesk AutoCAD 2017 Full Keygen X64 offers many improvements and enhancements over the previous versions of AutoCAD. Here are some of the most notable ones:

          -

          Autodesk AutoCAD 2017 Full Keygen X64


          Download Filehttps://tiurll.com/2uCiTE



          - -
            -
          • PDF Import: One of the most requested features by AutoCAD users is the ability to import PDF files as AutoCAD objects. Now you can easily import geometry, text, and raster images from a PDF file or underlay into your current drawing[^2^]. This will save you time and effort when working with PDF documents.
          • -
          • Smart Centerlines and Center Marks: Another useful feature of Autodesk AutoCAD 2017 Full Keygen X64 is the smart centerlines and center marks. These are dynamic objects that automatically update when you move or modify the associated objects. You can also customize their appearance and behavior to suit your preferences[^2^].
          • -
          • Share Design View: With Autodesk AutoCAD 2017 Full Keygen X64, you can easily share your design views with other stakeholders without sending your drawing files. You can publish your views to the cloud and generate a link that can be accessed by anyone with an internet connection. You can also control who can view, comment, or download your views[^2^].
          • -
          • 3D Printing: If you want to turn your 3D models into physical objects, you can use Autodesk AutoCAD 2017 Full Keygen X64 to prepare them for 3D printing. You can check for errors, optimize your mesh, and send your model to a 3D printer or an online service[^2^].
          • -
          • User Interface: Autodesk AutoCAD 2017 Full Keygen X64 has a modern and intuitive user interface that makes it easy to access the tools and commands you need. You can customize the ribbon, tool palettes, workspaces, and menus to suit your workflow. You can also use the command line or the new dialog boxes for more options[^2^].
          • -
          - -

          Benefits of Autodesk AutoCAD 2017 Full Keygen X64

          - -

          Autodesk AutoCAD 2017 Full Keygen X64 is not only a powerful and versatile software, but also a beneficial one for your productivity and creativity. Here are some of the benefits you can enjoy by using it:

          - -
            -
          • Accuracy: Autodesk AutoCAD 2017 Full Keygen X64 allows you to create precise and detailed drawings with accuracy and consistency. You can use various tools and features to ensure that your dimensions, annotations, coordinates, layers, blocks, and other elements are accurate and up to date.
          • -
          • Flexibility: Autodesk AutoCAD 2017 Full Keygen X64 gives you the flexibility to work with any type of design project, whether it is architectural, mechanical, electrical, civil, or anything else. You can also work with different file formats, such as DWG, DXF, DWF, PDF, JPG, PNG, etc.
          • -
          • Creativity: Autodesk AutoCAD 2017 Full Keygen X64 enables you to unleash your creativity and express your ideas in 2D and 3D. You can use various tools and features to create realistic and stunning visuals, such as materials, lighting, shadows, textures, colors, etc.
          • -
          • Collaboration: Autodesk AutoCAD 2017 Full Keygen X64 facilitates collaboration and communication with other team members and clients. You can use various tools and features to share your designs online or offline, such as cloud services, email attachments, social media platforms, etc. d5da3c52bf
            -
            -
            \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/BAPCo Mobilemark 2012.rar.md b/spaces/inreVtussa/clothingai/Examples/BAPCo Mobilemark 2012.rar.md deleted file mode 100644 index b79d58b8e793a83154e18e0bfd36adad69f5b45c..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/BAPCo Mobilemark 2012.rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

            BAPCo mobilemark 2012.rar


            DOWNLOAD ::: https://tiurll.com/2uCikJ



            - -BAPCo Mobilemark 2012.rar DOWNLOAD 9a27dcb523 Preisgnstige ... 081. bibigon new 2012 (8 vid).. Форум мгпо 081. рф Кубановедение 3 класс рабочая ... 1fdad05405
            -
            -
            -

            diff --git a/spaces/iricardoxd/chat_spanish/README.md b/spaces/iricardoxd/chat_spanish/README.md deleted file mode 100644 index f181df633d50b58adee64a243026ce4db3ed858e..0000000000000000000000000000000000000000 --- a/spaces/iricardoxd/chat_spanish/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Chat Voice Spanish -emoji: 👀 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -license: gpl -duplicated_from: iricardoxd/chat_voice_spanish ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/ms_deform_attn.py b/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/ms_deform_attn.py deleted file mode 100644 index 489d501bef364020212306d81e9b85c8daa27491..0000000000000000000000000000000000000000 --- a/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/ms_deform_attn.py +++ /dev/null @@ -1,413 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Deformable DETR -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------------------------------ -# Modified from: -# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/functions/ms_deform_attn_func.py -# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py -# https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/multi_scale_deform_attn.py -# ------------------------------------------------------------------------------------------------ - -import math -import warnings -from typing import Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.init import constant_, xavier_uniform_ - -try: - from groundingdino import _C -except: - warnings.warn("Failed to load custom C++ ops. Running on CPU mode Only!") - - -# helpers -def _is_power_of_2(n): - if (not isinstance(n, int)) or (n < 0): - raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n))) - return (n & (n - 1) == 0) and n != 0 - - -class MultiScaleDeformableAttnFunction(Function): - @staticmethod - def forward( - ctx, - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - im2col_step, - ): - ctx.im2col_step = im2col_step - output = _C.ms_deform_attn_forward( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - ctx.im2col_step, - ) - ctx.save_for_backward( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - ) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - ( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - ) = ctx.saved_tensors - grad_value, grad_sampling_loc, grad_attn_weight = _C.ms_deform_attn_backward( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - grad_output, - ctx.im2col_step, - ) - - return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None - - -def multi_scale_deformable_attn_pytorch( - value: torch.Tensor, - value_spatial_shapes: torch.Tensor, - sampling_locations: torch.Tensor, - attention_weights: torch.Tensor, -) -> torch.Tensor: - - bs, _, num_heads, embed_dims = value.shape - _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape - value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1) - sampling_grids = 2 * sampling_locations - 1 - sampling_value_list = [] - for level, (H_, W_) in enumerate(value_spatial_shapes): - # bs, H_*W_, num_heads, embed_dims -> - # bs, H_*W_, num_heads*embed_dims -> - # bs, num_heads*embed_dims, H_*W_ -> - # bs*num_heads, embed_dims, H_, W_ - value_l_ = ( - value_list[level].flatten(2).transpose(1, 2).reshape(bs * num_heads, embed_dims, H_, W_) - ) - # bs, num_queries, num_heads, num_points, 2 -> - # bs, num_heads, num_queries, num_points, 2 -> - # bs*num_heads, num_queries, num_points, 2 - sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1) - # bs*num_heads, embed_dims, num_queries, num_points - sampling_value_l_ = F.grid_sample( - value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False - ) - sampling_value_list.append(sampling_value_l_) - # (bs, num_queries, num_heads, num_levels, num_points) -> - # (bs, num_heads, num_queries, num_levels, num_points) -> - # (bs, num_heads, 1, num_queries, num_levels*num_points) - attention_weights = attention_weights.transpose(1, 2).reshape( - bs * num_heads, 1, num_queries, num_levels * num_points - ) - output = ( - (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) - .sum(-1) - .view(bs, num_heads * embed_dims, num_queries) - ) - return output.transpose(1, 2).contiguous() - - -class MultiScaleDeformableAttention(nn.Module): - """Multi-Scale Deformable Attention Module used in Deformable-DETR - - `Deformable DETR: Deformable Transformers for End-to-End Object Detection. - `_. - - Args: - embed_dim (int): The embedding dimension of Attention. Default: 256. - num_heads (int): The number of attention heads. Default: 8. - num_levels (int): The number of feature map used in Attention. Default: 4. - num_points (int): The number of sampling points for each query - in each head. Default: 4. - img2col_steps (int): The step used in image_to_column. Defualt: 64. - dropout (float): Dropout layer used in output. Default: 0.1. - batch_first (bool): if ``True``, then the input and output tensor will be - provided as `(bs, n, embed_dim)`. Default: False. `(n, bs, embed_dim)` - """ - - def __init__( - self, - embed_dim: int = 256, - num_heads: int = 8, - num_levels: int = 4, - num_points: int = 4, - img2col_step: int = 64, - batch_first: bool = False, - ): - super().__init__() - if embed_dim % num_heads != 0: - raise ValueError( - "embed_dim must be divisible by num_heads, but got {} and {}".format( - embed_dim, num_heads - ) - ) - head_dim = embed_dim // num_heads - - self.batch_first = batch_first - - if not _is_power_of_2(head_dim): - warnings.warn( - """ - You'd better set d_model in MSDeformAttn to make sure that - each dim of the attention head a power of 2, which is more efficient. - """ - ) - - self.im2col_step = img2col_step - self.embed_dim = embed_dim - self.num_heads = num_heads - self.num_levels = num_levels - self.num_points = num_points - self.sampling_offsets = nn.Linear(embed_dim, num_heads * num_levels * num_points * 2) - self.attention_weights = nn.Linear(embed_dim, num_heads * num_levels * num_points) - self.value_proj = nn.Linear(embed_dim, embed_dim) - self.output_proj = nn.Linear(embed_dim, embed_dim) - - self.init_weights() - - def _reset_parameters(self): - return self.init_weights() - - def init_weights(self): - """ - Default initialization for Parameters of Module. - """ - constant_(self.sampling_offsets.weight.data, 0.0) - thetas = torch.arange(self.num_heads, dtype=torch.float32) * ( - 2.0 * math.pi / self.num_heads - ) - grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) - grid_init = ( - (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) - .view(self.num_heads, 1, 1, 2) - .repeat(1, self.num_levels, self.num_points, 1) - ) - for i in range(self.num_points): - grid_init[:, :, i, :] *= i + 1 - with torch.no_grad(): - self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) - constant_(self.attention_weights.weight.data, 0.0) - constant_(self.attention_weights.bias.data, 0.0) - xavier_uniform_(self.value_proj.weight.data) - constant_(self.value_proj.bias.data, 0.0) - xavier_uniform_(self.output_proj.weight.data) - constant_(self.output_proj.bias.data, 0.0) - - def freeze_sampling_offsets(self): - print("Freeze sampling offsets") - self.sampling_offsets.weight.requires_grad = False - self.sampling_offsets.bias.requires_grad = False - - def freeze_attention_weights(self): - print("Freeze attention weights") - self.attention_weights.weight.requires_grad = False - self.attention_weights.bias.requires_grad = False - - def forward( - self, - query: torch.Tensor, - key: Optional[torch.Tensor] = None, - value: Optional[torch.Tensor] = None, - query_pos: Optional[torch.Tensor] = None, - key_padding_mask: Optional[torch.Tensor] = None, - reference_points: Optional[torch.Tensor] = None, - spatial_shapes: Optional[torch.Tensor] = None, - level_start_index: Optional[torch.Tensor] = None, - **kwargs - ) -> torch.Tensor: - - """Forward Function of MultiScaleDeformableAttention - - Args: - query (torch.Tensor): Query embeddings with shape - `(num_query, bs, embed_dim)` - key (torch.Tensor): Key embeddings with shape - `(num_key, bs, embed_dim)` - value (torch.Tensor): Value embeddings with shape - `(num_key, bs, embed_dim)` - query_pos (torch.Tensor): The position embedding for `query`. Default: None. - key_padding_mask (torch.Tensor): ByteTensor for `query`, with shape `(bs, num_key)`, - indicating which elements within `key` to be ignored in attention. - reference_points (torch.Tensor): The normalized reference points - with shape `(bs, num_query, num_levels, 2)`, - all elements is range in [0, 1], top-left (0, 0), - bottom-right (1, 1), including padding are. - or `(N, Length_{query}, num_levels, 4)`, add additional - two dimensions `(h, w)` to form reference boxes. - spatial_shapes (torch.Tensor): Spatial shape of features in different levels. - With shape `(num_levels, 2)`, last dimension represents `(h, w)`. - level_start_index (torch.Tensor): The start index of each level. A tensor with - shape `(num_levels, )` which can be represented as - `[0, h_0 * w_0, h_0 * w_0 + h_1 * w_1, ...]`. - - Returns: - torch.Tensor: forward results with shape `(num_query, bs, embed_dim)` - """ - - if value is None: - value = query - - if query_pos is not None: - query = query + query_pos - - if not self.batch_first: - # change to (bs, num_query ,embed_dims) - query = query.permute(1, 0, 2) - value = value.permute(1, 0, 2) - - bs, num_query, _ = query.shape - bs, num_value, _ = value.shape - - assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value - - value = self.value_proj(value) - if key_padding_mask is not None: - value = value.masked_fill(key_padding_mask[..., None], float(0)) - value = value.view(bs, num_value, self.num_heads, -1) - sampling_offsets = self.sampling_offsets(query).view( - bs, num_query, self.num_heads, self.num_levels, self.num_points, 2 - ) - attention_weights = self.attention_weights(query).view( - bs, num_query, self.num_heads, self.num_levels * self.num_points - ) - attention_weights = attention_weights.softmax(-1) - attention_weights = attention_weights.view( - bs, - num_query, - self.num_heads, - self.num_levels, - self.num_points, - ) - - # bs, num_query, num_heads, num_levels, num_points, 2 - if reference_points.shape[-1] == 2: - offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) - sampling_locations = ( - reference_points[:, :, None, :, None, :] - + sampling_offsets / offset_normalizer[None, None, None, :, None, :] - ) - elif reference_points.shape[-1] == 4: - sampling_locations = ( - reference_points[:, :, None, :, None, :2] - + sampling_offsets - / self.num_points - * reference_points[:, :, None, :, None, 2:] - * 0.5 - ) - else: - raise ValueError( - "Last dim of reference_points must be 2 or 4, but get {} instead.".format( - reference_points.shape[-1] - ) - ) - - if torch.cuda.is_available() and value.is_cuda: - halffloat = False - if value.dtype == torch.float16: - halffloat = True - value = value.float() - sampling_locations = sampling_locations.float() - attention_weights = attention_weights.float() - - output = MultiScaleDeformableAttnFunction.apply( - value, - spatial_shapes, - level_start_index, - sampling_locations, - attention_weights, - self.im2col_step, - ) - - if halffloat: - output = output.half() - else: - output = multi_scale_deformable_attn_pytorch( - value, spatial_shapes, sampling_locations, attention_weights - ) - - output = self.output_proj(output) - - if not self.batch_first: - output = output.permute(1, 0, 2) - - return output - - -def create_dummy_class(klass, dependency, message=""): - """ - When a dependency of a class is not available, create a dummy class which throws ImportError - when used. - - Args: - klass (str): name of the class. - dependency (str): name of the dependency. - message: extra message to print - Returns: - class: a class object - """ - err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, klass) - if message: - err = err + " " + message - - class _DummyMetaClass(type): - # throw error on class attribute access - def __getattr__(_, __): # noqa: B902 - raise ImportError(err) - - class _Dummy(object, metaclass=_DummyMetaClass): - # throw error on constructor - def __init__(self, *args, **kwargs): - raise ImportError(err) - - return _Dummy - - -def create_dummy_func(func, dependency, message=""): - """ - When a dependency of a function is not available, create a dummy function which throws - ImportError when used. - - Args: - func (str): name of the function. - dependency (str or list[str]): name(s) of the dependency. - message: extra message to print - Returns: - function: a function object - """ - err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, func) - if message: - err = err + " " + message - - if isinstance(dependency, (list, tuple)): - dependency = ",".join(dependency) - - def _dummy(*args, **kwargs): - raise ImportError(err) - - return _dummy diff --git a/spaces/jackyccl/segment-anything/groundingdino/util/misc.py b/spaces/jackyccl/segment-anything/groundingdino/util/misc.py deleted file mode 100644 index d64b84ef24bea0c98e76824feb1903f6bfebe7a5..0000000000000000000000000000000000000000 --- a/spaces/jackyccl/segment-anything/groundingdino/util/misc.py +++ /dev/null @@ -1,717 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Misc functions, including distributed helpers. - -Mostly copy-paste from torchvision references. -""" -import colorsys -import datetime -import functools -import io -import json -import os -import pickle -import subprocess -import time -from collections import OrderedDict, defaultdict, deque -from typing import List, Optional - -import numpy as np -import torch -import torch.distributed as dist - -# needed due to empty tensor bug in pytorch and torchvision 0.5 -import torchvision -from torch import Tensor - -__torchvision_need_compat_flag = float(torchvision.__version__.split(".")[1]) < 7 -if __torchvision_need_compat_flag: - from torchvision.ops import _new_empty_tensor - from torchvision.ops.misc import _output_size - - -class SmoothedValue(object): - """Track a series of values and provide access to smoothed values over a - window or the global series average. - """ - - def __init__(self, window_size=20, fmt=None): - if fmt is None: - fmt = "{median:.4f} ({global_avg:.4f})" - self.deque = deque(maxlen=window_size) - self.total = 0.0 - self.count = 0 - self.fmt = fmt - - def update(self, value, n=1): - self.deque.append(value) - self.count += n - self.total += value * n - - def synchronize_between_processes(self): - """ - Warning: does not synchronize the deque! - """ - if not is_dist_avail_and_initialized(): - return - t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda") - dist.barrier() - dist.all_reduce(t) - t = t.tolist() - self.count = int(t[0]) - self.total = t[1] - - @property - def median(self): - d = torch.tensor(list(self.deque)) - if d.shape[0] == 0: - return 0 - return d.median().item() - - @property - def avg(self): - d = torch.tensor(list(self.deque), dtype=torch.float32) - return d.mean().item() - - @property - def global_avg(self): - if os.environ.get("SHILONG_AMP", None) == "1": - eps = 1e-4 - else: - eps = 1e-6 - return self.total / (self.count + eps) - - @property - def max(self): - return max(self.deque) - - @property - def value(self): - return self.deque[-1] - - def __str__(self): - return self.fmt.format( - median=self.median, - avg=self.avg, - global_avg=self.global_avg, - max=self.max, - value=self.value, - ) - - -@functools.lru_cache() -def _get_global_gloo_group(): - """ - Return a process group based on gloo backend, containing all the ranks - The result is cached. - """ - - if dist.get_backend() == "nccl": - return dist.new_group(backend="gloo") - - return dist.group.WORLD - - -def all_gather_cpu(data): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors) - Args: - data: any picklable object - Returns: - list[data]: list of data gathered from each rank - """ - - world_size = get_world_size() - if world_size == 1: - return [data] - - cpu_group = _get_global_gloo_group() - - buffer = io.BytesIO() - torch.save(data, buffer) - data_view = buffer.getbuffer() - device = "cuda" if cpu_group is None else "cpu" - tensor = torch.ByteTensor(data_view).to(device) - - # obtain Tensor size of each rank - local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long) - size_list = [torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)] - if cpu_group is None: - dist.all_gather(size_list, local_size) - else: - print("gathering on cpu") - dist.all_gather(size_list, local_size, group=cpu_group) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - assert isinstance(local_size.item(), int) - local_size = int(local_size.item()) - - # receiving Tensor from all ranks - # we pad the tensor because torch all_gather does not support - # gathering tensors of different shapes - tensor_list = [] - for _ in size_list: - tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device)) - if local_size != max_size: - padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=device) - tensor = torch.cat((tensor, padding), dim=0) - if cpu_group is None: - dist.all_gather(tensor_list, tensor) - else: - dist.all_gather(tensor_list, tensor, group=cpu_group) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - tensor = torch.split(tensor, [size, max_size - size], dim=0)[0] - buffer = io.BytesIO(tensor.cpu().numpy()) - obj = torch.load(buffer) - data_list.append(obj) - - return data_list - - -def all_gather(data): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors) - Args: - data: any picklable object - Returns: - list[data]: list of data gathered from each rank - """ - - if os.getenv("CPU_REDUCE") == "1": - return all_gather_cpu(data) - - world_size = get_world_size() - if world_size == 1: - return [data] - - # serialized to a Tensor - buffer = pickle.dumps(data) - storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to("cuda") - - # obtain Tensor size of each rank - local_size = torch.tensor([tensor.numel()], device="cuda") - size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] - dist.all_gather(size_list, local_size) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - - # receiving Tensor from all ranks - # we pad the tensor because torch all_gather does not support - # gathering tensors of different shapes - tensor_list = [] - for _ in size_list: - tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) - if local_size != max_size: - padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") - tensor = torch.cat((tensor, padding), dim=0) - dist.all_gather(tensor_list, tensor) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - - return data_list - - -def reduce_dict(input_dict, average=True): - """ - Args: - input_dict (dict): all the values will be reduced - average (bool): whether to do average or sum - Reduce the values in the dictionary from all processes so that all processes - have the averaged results. Returns a dict with the same fields as - input_dict, after reduction. - """ - world_size = get_world_size() - if world_size < 2: - return input_dict - with torch.no_grad(): - names = [] - values = [] - # sort the keys so that they are consistent across processes - for k in sorted(input_dict.keys()): - names.append(k) - values.append(input_dict[k]) - values = torch.stack(values, dim=0) - dist.all_reduce(values) - if average: - values /= world_size - reduced_dict = {k: v for k, v in zip(names, values)} - return reduced_dict - - -class MetricLogger(object): - def __init__(self, delimiter="\t"): - self.meters = defaultdict(SmoothedValue) - self.delimiter = delimiter - - def update(self, **kwargs): - for k, v in kwargs.items(): - if isinstance(v, torch.Tensor): - v = v.item() - assert isinstance(v, (float, int)) - self.meters[k].update(v) - - def __getattr__(self, attr): - if attr in self.meters: - return self.meters[attr] - if attr in self.__dict__: - return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) - - def __str__(self): - loss_str = [] - for name, meter in self.meters.items(): - # print(name, str(meter)) - # import ipdb;ipdb.set_trace() - if meter.count > 0: - loss_str.append("{}: {}".format(name, str(meter))) - return self.delimiter.join(loss_str) - - def synchronize_between_processes(self): - for meter in self.meters.values(): - meter.synchronize_between_processes() - - def add_meter(self, name, meter): - self.meters[name] = meter - - def log_every(self, iterable, print_freq, header=None, logger=None): - if logger is None: - print_func = print - else: - print_func = logger.info - - i = 0 - if not header: - header = "" - start_time = time.time() - end = time.time() - iter_time = SmoothedValue(fmt="{avg:.4f}") - data_time = SmoothedValue(fmt="{avg:.4f}") - space_fmt = ":" + str(len(str(len(iterable)))) + "d" - if torch.cuda.is_available(): - log_msg = self.delimiter.join( - [ - header, - "[{0" + space_fmt + "}/{1}]", - "eta: {eta}", - "{meters}", - "time: {time}", - "data: {data}", - "max mem: {memory:.0f}", - ] - ) - else: - log_msg = self.delimiter.join( - [ - header, - "[{0" + space_fmt + "}/{1}]", - "eta: {eta}", - "{meters}", - "time: {time}", - "data: {data}", - ] - ) - MB = 1024.0 * 1024.0 - for obj in iterable: - data_time.update(time.time() - end) - yield obj - # import ipdb; ipdb.set_trace() - iter_time.update(time.time() - end) - if i % print_freq == 0 or i == len(iterable) - 1: - eta_seconds = iter_time.global_avg * (len(iterable) - i) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - if torch.cuda.is_available(): - print_func( - log_msg.format( - i, - len(iterable), - eta=eta_string, - meters=str(self), - time=str(iter_time), - data=str(data_time), - memory=torch.cuda.max_memory_allocated() / MB, - ) - ) - else: - print_func( - log_msg.format( - i, - len(iterable), - eta=eta_string, - meters=str(self), - time=str(iter_time), - data=str(data_time), - ) - ) - i += 1 - end = time.time() - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print_func( - "{} Total time: {} ({:.4f} s / it)".format( - header, total_time_str, total_time / len(iterable) - ) - ) - - -def get_sha(): - cwd = os.path.dirname(os.path.abspath(__file__)) - - def _run(command): - return subprocess.check_output(command, cwd=cwd).decode("ascii").strip() - - sha = "N/A" - diff = "clean" - branch = "N/A" - try: - sha = _run(["git", "rev-parse", "HEAD"]) - subprocess.check_output(["git", "diff"], cwd=cwd) - diff = _run(["git", "diff-index", "HEAD"]) - diff = "has uncommited changes" if diff else "clean" - branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"]) - except Exception: - pass - message = f"sha: {sha}, status: {diff}, branch: {branch}" - return message - - -def collate_fn(batch): - # import ipdb; ipdb.set_trace() - batch = list(zip(*batch)) - batch[0] = nested_tensor_from_tensor_list(batch[0]) - return tuple(batch) - - -def _max_by_axis(the_list): - # type: (List[List[int]]) -> List[int] - maxes = the_list[0] - for sublist in the_list[1:]: - for index, item in enumerate(sublist): - maxes[index] = max(maxes[index], item) - return maxes - - -class NestedTensor(object): - def __init__(self, tensors, mask: Optional[Tensor]): - self.tensors = tensors - self.mask = mask - if mask == "auto": - self.mask = torch.zeros_like(tensors).to(tensors.device) - if self.mask.dim() == 3: - self.mask = self.mask.sum(0).to(bool) - elif self.mask.dim() == 4: - self.mask = self.mask.sum(1).to(bool) - else: - raise ValueError( - "tensors dim must be 3 or 4 but {}({})".format( - self.tensors.dim(), self.tensors.shape - ) - ) - - def imgsize(self): - res = [] - for i in range(self.tensors.shape[0]): - mask = self.mask[i] - maxH = (~mask).sum(0).max() - maxW = (~mask).sum(1).max() - res.append(torch.Tensor([maxH, maxW])) - return res - - def to(self, device): - # type: (Device) -> NestedTensor # noqa - cast_tensor = self.tensors.to(device) - mask = self.mask - if mask is not None: - assert mask is not None - cast_mask = mask.to(device) - else: - cast_mask = None - return NestedTensor(cast_tensor, cast_mask) - - def to_img_list_single(self, tensor, mask): - assert tensor.dim() == 3, "dim of tensor should be 3 but {}".format(tensor.dim()) - maxH = (~mask).sum(0).max() - maxW = (~mask).sum(1).max() - img = tensor[:, :maxH, :maxW] - return img - - def to_img_list(self): - """remove the padding and convert to img list - - Returns: - [type]: [description] - """ - if self.tensors.dim() == 3: - return self.to_img_list_single(self.tensors, self.mask) - else: - res = [] - for i in range(self.tensors.shape[0]): - tensor_i = self.tensors[i] - mask_i = self.mask[i] - res.append(self.to_img_list_single(tensor_i, mask_i)) - return res - - @property - def device(self): - return self.tensors.device - - def decompose(self): - return self.tensors, self.mask - - def __repr__(self): - return str(self.tensors) - - @property - def shape(self): - return {"tensors.shape": self.tensors.shape, "mask.shape": self.mask.shape} - - -def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): - # TODO make this more general - if tensor_list[0].ndim == 3: - if torchvision._is_tracing(): - # nested_tensor_from_tensor_list() does not export well to ONNX - # call _onnx_nested_tensor_from_tensor_list() instead - return _onnx_nested_tensor_from_tensor_list(tensor_list) - - # TODO make it support different-sized images - max_size = _max_by_axis([list(img.shape) for img in tensor_list]) - # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) - batch_shape = [len(tensor_list)] + max_size - b, c, h, w = batch_shape - dtype = tensor_list[0].dtype - device = tensor_list[0].device - tensor = torch.zeros(batch_shape, dtype=dtype, device=device) - mask = torch.ones((b, h, w), dtype=torch.bool, device=device) - for img, pad_img, m in zip(tensor_list, tensor, mask): - pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - m[: img.shape[1], : img.shape[2]] = False - else: - raise ValueError("not supported") - return NestedTensor(tensor, mask) - - -# _onnx_nested_tensor_from_tensor_list() is an implementation of -# nested_tensor_from_tensor_list() that is supported by ONNX tracing. -@torch.jit.unused -def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: - max_size = [] - for i in range(tensor_list[0].dim()): - max_size_i = torch.max( - torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32) - ).to(torch.int64) - max_size.append(max_size_i) - max_size = tuple(max_size) - - # work around for - # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - # m[: img.shape[1], :img.shape[2]] = False - # which is not yet supported in onnx - padded_imgs = [] - padded_masks = [] - for img in tensor_list: - padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] - padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) - padded_imgs.append(padded_img) - - m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) - padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) - padded_masks.append(padded_mask.to(torch.bool)) - - tensor = torch.stack(padded_imgs) - mask = torch.stack(padded_masks) - - return NestedTensor(tensor, mask=mask) - - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - import builtins as __builtin__ - - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop("force", False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(): - return get_rank() == 0 - - -def save_on_master(*args, **kwargs): - if is_main_process(): - torch.save(*args, **kwargs) - - -def init_distributed_mode(args): - if "WORLD_SIZE" in os.environ and os.environ["WORLD_SIZE"] != "": # 'RANK' in os.environ and - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ["WORLD_SIZE"]) - args.gpu = args.local_rank = int(os.environ["LOCAL_RANK"]) - - # launch by torch.distributed.launch - # Single node - # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 1 --rank 0 ... - # Multi nodes - # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 0 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ... - # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 1 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ... - # args.rank = int(os.environ.get('OMPI_COMM_WORLD_RANK')) - # local_world_size = int(os.environ['GPU_PER_NODE_COUNT']) - # args.world_size = args.world_size * local_world_size - # args.gpu = args.local_rank = int(os.environ['LOCAL_RANK']) - # args.rank = args.rank * local_world_size + args.local_rank - print( - "world size: {}, rank: {}, local rank: {}".format( - args.world_size, args.rank, args.local_rank - ) - ) - print(json.dumps(dict(os.environ), indent=2)) - elif "SLURM_PROCID" in os.environ: - args.rank = int(os.environ["SLURM_PROCID"]) - args.gpu = args.local_rank = int(os.environ["SLURM_LOCALID"]) - args.world_size = int(os.environ["SLURM_NPROCS"]) - - print( - "world size: {}, world rank: {}, local rank: {}, device_count: {}".format( - args.world_size, args.rank, args.local_rank, torch.cuda.device_count() - ) - ) - else: - print("Not using distributed mode") - args.distributed = False - args.world_size = 1 - args.rank = 0 - args.local_rank = 0 - return - - print("world_size:{} rank:{} local_rank:{}".format(args.world_size, args.rank, args.local_rank)) - args.distributed = True - torch.cuda.set_device(args.local_rank) - args.dist_backend = "nccl" - print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True) - - torch.distributed.init_process_group( - backend=args.dist_backend, - world_size=args.world_size, - rank=args.rank, - init_method=args.dist_url, - ) - - print("Before torch.distributed.barrier()") - torch.distributed.barrier() - print("End torch.distributed.barrier()") - setup_for_distributed(args.rank == 0) - - -@torch.no_grad() -def accuracy(output, target, topk=(1,)): - """Computes the precision@k for the specified values of k""" - if target.numel() == 0: - return [torch.zeros([], device=output.device)] - maxk = max(topk) - batch_size = target.size(0) - - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - - res = [] - for k in topk: - correct_k = correct[:k].view(-1).float().sum(0) - res.append(correct_k.mul_(100.0 / batch_size)) - return res - - -@torch.no_grad() -def accuracy_onehot(pred, gt): - """_summary_ - - Args: - pred (_type_): n, c - gt (_type_): n, c - """ - tp = ((pred - gt).abs().sum(-1) < 1e-4).float().sum() - acc = tp / gt.shape[0] * 100 - return acc - - -def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): - # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor - """ - Equivalent to nn.functional.interpolate, but with support for empty batch sizes. - This will eventually be supported natively by PyTorch, and this - class can go away. - """ - if __torchvision_need_compat_flag < 0.7: - if input.numel() > 0: - return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners) - - output_shape = _output_size(2, input, size, scale_factor) - output_shape = list(input.shape[:-2]) + list(output_shape) - return _new_empty_tensor(input, output_shape) - else: - return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) - - -class color_sys: - def __init__(self, num_colors) -> None: - self.num_colors = num_colors - colors = [] - for i in np.arange(0.0, 360.0, 360.0 / num_colors): - hue = i / 360.0 - lightness = (50 + np.random.rand() * 10) / 100.0 - saturation = (90 + np.random.rand() * 10) / 100.0 - colors.append( - tuple([int(j * 255) for j in colorsys.hls_to_rgb(hue, lightness, saturation)]) - ) - self.colors = colors - - def __call__(self, idx): - return self.colors[idx] - - -def inverse_sigmoid(x, eps=1e-3): - x = x.clamp(min=0, max=1) - x1 = x.clamp(min=eps) - x2 = (1 - x).clamp(min=eps) - return torch.log(x1 / x2) - - -def clean_state_dict(state_dict): - new_state_dict = OrderedDict() - for k, v in state_dict.items(): - if k[:7] == "module.": - k = k[7:] # remove `module.` - new_state_dict[k] = v - return new_state_dict diff --git a/spaces/joaogabriellima/Real-Time-Voice-Cloning/README.md b/spaces/joaogabriellima/Real-Time-Voice-Cloning/README.md deleted file mode 100644 index 753600359817925a36e07b3b5c944567a2f0d946..0000000000000000000000000000000000000000 --- a/spaces/joaogabriellima/Real-Time-Voice-Cloning/README.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Real Time Voice Cloning -emoji: 📈 -colorFrom: blue -colorTo: red -sdk: gradio -app_file: app.py -sdk_version: 3.17.1 -pinned: false -duplicated_from: akhaliq/Real-Time-Voice-Cloning ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/joaogabriellima/Real-Time-Voice-Cloning/vocoder/display.py b/spaces/joaogabriellima/Real-Time-Voice-Cloning/vocoder/display.py deleted file mode 100644 index 956880722a3f05613ebd06f5686b3d8a59642e92..0000000000000000000000000000000000000000 --- a/spaces/joaogabriellima/Real-Time-Voice-Cloning/vocoder/display.py +++ /dev/null @@ -1,120 +0,0 @@ -import matplotlib.pyplot as plt -import time -import numpy as np -import sys - - -def progbar(i, n, size=16): - done = (i * size) // n - bar = '' - for i in range(size): - bar += '█' if i <= done else '░' - return bar - - -def stream(message) : - try: - sys.stdout.write("\r{%s}" % message) - except: - #Remove non-ASCII characters from message - message = ''.join(i for i in message if ord(i)<128) - sys.stdout.write("\r{%s}" % message) - - -def simple_table(item_tuples) : - - border_pattern = '+---------------------------------------' - whitespace = ' ' - - headings, cells, = [], [] - - for item in item_tuples : - - heading, cell = str(item[0]), str(item[1]) - - pad_head = True if len(heading) < len(cell) else False - - pad = abs(len(heading) - len(cell)) - pad = whitespace[:pad] - - pad_left = pad[:len(pad)//2] - pad_right = pad[len(pad)//2:] - - if pad_head : - heading = pad_left + heading + pad_right - else : - cell = pad_left + cell + pad_right - - headings += [heading] - cells += [cell] - - border, head, body = '', '', '' - - for i in range(len(item_tuples)) : - - temp_head = f'| {headings[i]} ' - temp_body = f'| {cells[i]} ' - - border += border_pattern[:len(temp_head)] - head += temp_head - body += temp_body - - if i == len(item_tuples) - 1 : - head += '|' - body += '|' - border += '+' - - print(border) - print(head) - print(border) - print(body) - print(border) - print(' ') - - -def time_since(started) : - elapsed = time.time() - started - m = int(elapsed // 60) - s = int(elapsed % 60) - if m >= 60 : - h = int(m // 60) - m = m % 60 - return f'{h}h {m}m {s}s' - else : - return f'{m}m {s}s' - - -def save_attention(attn, path) : - fig = plt.figure(figsize=(12, 6)) - plt.imshow(attn.T, interpolation='nearest', aspect='auto') - fig.savefig(f'{path}.png', bbox_inches='tight') - plt.close(fig) - - -def save_spectrogram(M, path, length=None) : - M = np.flip(M, axis=0) - if length : M = M[:, :length] - fig = plt.figure(figsize=(12, 6)) - plt.imshow(M, interpolation='nearest', aspect='auto') - fig.savefig(f'{path}.png', bbox_inches='tight') - plt.close(fig) - - -def plot(array) : - fig = plt.figure(figsize=(30, 5)) - ax = fig.add_subplot(111) - ax.xaxis.label.set_color('grey') - ax.yaxis.label.set_color('grey') - ax.xaxis.label.set_fontsize(23) - ax.yaxis.label.set_fontsize(23) - ax.tick_params(axis='x', colors='grey', labelsize=23) - ax.tick_params(axis='y', colors='grey', labelsize=23) - plt.plot(array) - - -def plot_spec(M) : - M = np.flip(M, axis=0) - plt.figure(figsize=(18,4)) - plt.imshow(M, interpolation='nearest', aspect='auto') - plt.show() - diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/quic/_common.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/quic/_common.py deleted file mode 100644 index 38ec103ff8c04b0fa6da4b5e67c56f4fec989b11..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/quic/_common.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -import socket -import struct -import time -from typing import Any, Optional - -import aioquic.quic.configuration # type: ignore -import aioquic.quic.connection # type: ignore - -import dns.inet - -QUIC_MAX_DATAGRAM = 2048 - - -class UnexpectedEOF(Exception): - pass - - -class Buffer: - def __init__(self): - self._buffer = b"" - self._seen_end = False - - def put(self, data, is_end): - if self._seen_end: - return - self._buffer += data - if is_end: - self._seen_end = True - - def have(self, amount): - if len(self._buffer) >= amount: - return True - if self._seen_end: - raise UnexpectedEOF - return False - - def seen_end(self): - return self._seen_end - - def get(self, amount): - assert self.have(amount) - data = self._buffer[:amount] - self._buffer = self._buffer[amount:] - return data - - -class BaseQuicStream: - def __init__(self, connection, stream_id): - self._connection = connection - self._stream_id = stream_id - self._buffer = Buffer() - self._expecting = 0 - - def id(self): - return self._stream_id - - def _expiration_from_timeout(self, timeout): - if timeout is not None: - expiration = time.time() + timeout - else: - expiration = None - return expiration - - def _timeout_from_expiration(self, expiration): - if expiration is not None: - timeout = max(expiration - time.time(), 0.0) - else: - timeout = None - return timeout - - # Subclass must implement receive() as sync / async and which returns a message - # or raises UnexpectedEOF. - - def _encapsulate(self, datagram): - l = len(datagram) - return struct.pack("!H", l) + datagram - - def _common_add_input(self, data, is_end): - self._buffer.put(data, is_end) - return self._expecting > 0 and self._buffer.have(self._expecting) - - def _close(self): - self._connection.close_stream(self._stream_id) - self._buffer.put(b"", True) # send EOF in case we haven't seen it. - - -class BaseQuicConnection: - def __init__( - self, connection, address, port, source=None, source_port=0, manager=None - ): - self._done = False - self._connection = connection - self._address = address - self._port = port - self._closed = False - self._manager = manager - self._streams = {} - self._af = dns.inet.af_for_address(address) - self._peer = dns.inet.low_level_address_tuple((address, port)) - if source is None and source_port != 0: - if self._af == socket.AF_INET: - source = "0.0.0.0" - elif self._af == socket.AF_INET6: - source = "::" - else: - raise NotImplementedError - if source: - self._source = (source, source_port) - else: - self._source = None - - def close_stream(self, stream_id): - del self._streams[stream_id] - - def _get_timer_values(self, closed_is_special=True): - now = time.time() - expiration = self._connection.get_timer() - if expiration is None: - expiration = now + 3600 # arbitrary "big" value - interval = max(expiration - now, 0) - if self._closed and closed_is_special: - # lower sleep interval to avoid a race in the closing process - # which can lead to higher latency closing due to sleeping when - # we have events. - interval = min(interval, 0.05) - return (expiration, interval) - - def _handle_timer(self, expiration): - now = time.time() - if expiration <= now: - self._connection.handle_timer(now) - - -class AsyncQuicConnection(BaseQuicConnection): - async def make_stream(self, timeout: Optional[float] = None) -> Any: - pass - - -class BaseQuicManager: - def __init__(self, conf, verify_mode, connection_factory, server_name=None): - self._connections = {} - self._connection_factory = connection_factory - if conf is None: - verify_path = None - if isinstance(verify_mode, str): - verify_path = verify_mode - verify_mode = True - conf = aioquic.quic.configuration.QuicConfiguration( - alpn_protocols=["doq", "doq-i03"], - verify_mode=verify_mode, - server_name=server_name, - ) - if verify_path is not None: - conf.load_verify_locations(verify_path) - self._conf = conf - - def _connect(self, address, port=853, source=None, source_port=0): - connection = self._connections.get((address, port)) - if connection is not None: - return (connection, False) - qconn = aioquic.quic.connection.QuicConnection(configuration=self._conf) - qconn.connect(address, time.time()) - connection = self._connection_factory( - qconn, address, port, source, source_port, self - ) - self._connections[(address, port)] = connection - return (connection, True) - - def closed(self, address, port): - try: - del self._connections[(address, port)] - except KeyError: - pass - - -class AsyncQuicManager(BaseQuicManager): - def connect(self, address, port=853, source=None, source_port=0): - raise NotImplementedError diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/components/audio.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/components/audio.py deleted file mode 100644 index 22436d1c6209aa7db1838a70688cc41705adc40f..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/components/audio.py +++ /dev/null @@ -1,395 +0,0 @@ -"""gr.Audio() component.""" - -from __future__ import annotations - -import tempfile -import warnings -from pathlib import Path -from typing import Any, Callable, Literal - -import numpy as np -import requests -from gradio_client import media_data -from gradio_client import utils as client_utils -from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import FileSerializable - -from gradio import processing_utils, utils -from gradio.components.base import IOComponent, _Keywords -from gradio.events import ( - Changeable, - Clearable, - Playable, - Recordable, - Streamable, - StreamableOutput, - Uploadable, -) -from gradio.interpretation import TokenInterpretable - -set_documentation_group("component") - - -@document() -class Audio( - Changeable, - Clearable, - Playable, - Recordable, - Streamable, - StreamableOutput, - Uploadable, - IOComponent, - FileSerializable, - TokenInterpretable, -): - """ - Creates an audio component that can be used to upload/record audio (as an input) or display audio (as an output). - Preprocessing: passes the uploaded audio as a {Tuple(int, numpy.array)} corresponding to (sample rate in Hz, audio data as a 16-bit int array whose values range from -32768 to 32767), or as a {str} filepath, depending on `type`. - Postprocessing: expects a {Tuple(int, numpy.array)} corresponding to (sample rate in Hz, audio data as a float or int numpy array) or as a {str} or {pathlib.Path} filepath or URL to an audio file, or bytes for binary content (recommended for streaming) - Examples-format: a {str} filepath to a local file that contains audio. - Demos: main_note, generate_tone, reverse_audio - Guides: real-time-speech-recognition - """ - - def __init__( - self, - value: str | Path | tuple[int, np.ndarray] | Callable | None = None, - *, - source: Literal["upload", "microphone"] | None = None, - type: Literal["numpy", "filepath"] = "numpy", - label: str | None = None, - every: float | None = None, - show_label: bool | None = None, - container: bool = True, - scale: int | None = None, - min_width: int = 160, - interactive: bool | None = None, - visible: bool = True, - streaming: bool = False, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - format: Literal["wav", "mp3"] = "wav", - autoplay: bool = False, - show_download_button=True, - show_share_button: bool | None = None, - show_edit_button: bool | None = True, - **kwargs, - ): - """ - Parameters: - value: A path, URL, or [sample_rate, numpy array] tuple (sample rate in Hz, audio data as a float or int numpy array) for the default value that Audio component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component. - source: Source of audio. "upload" creates a box where user can drop an audio file, "microphone" creates a microphone input. - type: The format the audio file is converted to before being passed into the prediction function. "numpy" converts the audio to a tuple consisting of: (int sample rate, numpy.array for the data), "filepath" passes a str path to a temporary file containing the audio. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - container: If True, will place the component in a container - providing some extra padding around the border. - scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. - min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. - interactive: if True, will allow users to upload and edit a audio file; if False, can only be used to play audio. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - streaming: If set to True when used in a `live` interface as an input, will automatically stream webcam feed. When used set as an output, takes audio chunks yield from the backend and combines them into one streaming audio output. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - format: The file format to save audio files. Either 'wav' or 'mp3'. wav files are lossless but will tend to be larger files. mp3 files tend to be smaller. Default is wav. Applies both when this component is used as an input (when `type` is "format") and when this component is used as an output. - autoplay: Whether to automatically play the audio when the component is used as an output. Note: browsers will not autoplay audio files if the user has not interacted with the page yet. - show_download_button: If True, will show a download button in the corner of the component for saving audio. If False, icon does not appear. - show_share_button: If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise. - show_edit_button: If True, will show an edit icon in the corner of the component that allows user to edit the audio. If False, icon does not appear. Default is True. - """ - valid_sources = ["upload", "microphone"] - source = source if source else ("microphone" if streaming else "upload") - if source not in valid_sources: - raise ValueError( - f"Invalid value for parameter `source`: {source}. Please choose from one of: {valid_sources}" - ) - self.source = source - valid_types = ["numpy", "filepath"] - if type not in valid_types: - raise ValueError( - f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" - ) - self.type = type - self.streaming = streaming - if streaming and source == "upload": - raise ValueError( - "Audio streaming only available if source is 'microphone'." - ) - self.format = format - self.autoplay = autoplay - self.show_download_button = show_download_button - self.show_share_button = ( - (utils.get_space() is not None) - if show_share_button is None - else show_share_button - ) - self.show_edit_button = show_edit_button - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - container=container, - scale=scale, - min_width=min_width, - interactive=interactive, - visible=visible, - elem_id=elem_id, - elem_classes=elem_classes, - value=value, - **kwargs, - ) - TokenInterpretable.__init__(self) - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": {"is_file": False, "data": media_data.BASE64_AUDIO}, - "serialized": "https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav", - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - source: Literal["upload", "microphone"] | None = None, - label: str | None = None, - show_label: bool | None = None, - container: bool | None = None, - scale: int | None = None, - min_width: int | None = None, - interactive: bool | None = None, - visible: bool | None = None, - autoplay: bool | None = None, - show_download_button: bool | None = None, - show_share_button: bool | None = None, - show_edit_button: bool | None = None, - ): - warnings.warn( - "Using the update method is deprecated. Simply return a new object instead, e.g. `return gr.Audio(...)` instead of `return gr.Audio.update(...)`." - ) - return { - "source": source, - "label": label, - "show_label": show_label, - "container": container, - "scale": scale, - "min_width": min_width, - "interactive": interactive, - "visible": visible, - "value": value, - "autoplay": autoplay, - "show_download_button": show_download_button, - "show_share_button": show_share_button, - "show_edit_button": show_edit_button, - "__type__": "update", - } - - def preprocess( - self, x: dict[str, Any] | None - ) -> tuple[int, np.ndarray] | str | None: - """ - Parameters: - x: dictionary with keys "name", "data", "is_file", "crop_min", "crop_max". - Returns: - audio in requested format - """ - if x is None: - return x - file_name, file_data, is_file = ( - x["name"], - x["data"], - x.get("is_file", False), - ) - crop_min, crop_max = x.get("crop_min", 0), x.get("crop_max", 100) - if is_file: - if client_utils.is_http_url_like(file_name): - temp_file_path = self.download_temp_copy_if_needed(file_name) - else: - temp_file_path = self.make_temp_copy_if_needed(file_name) - else: - temp_file_path = self.base64_to_temp_file_if_needed(file_data, file_name) - - sample_rate, data = processing_utils.audio_from_file( - temp_file_path, crop_min=crop_min, crop_max=crop_max - ) - - # Need a unique name for the file to avoid re-using the same audio file if - # a user submits the same audio file twice, but with different crop min/max. - temp_file_path = Path(temp_file_path) - output_file_name = str( - temp_file_path.with_name( - f"{temp_file_path.stem}-{crop_min}-{crop_max}{temp_file_path.suffix}" - ) - ) - - if self.type == "numpy": - return sample_rate, data - elif self.type == "filepath": - output_file = str(Path(output_file_name).with_suffix(f".{self.format}")) - processing_utils.audio_to_file( - sample_rate, data, output_file, format=self.format - ) - return output_file - else: - raise ValueError( - "Unknown type: " - + str(self.type) - + ". Please choose from: 'numpy', 'filepath'." - ) - - def set_interpret_parameters(self, segments: int = 8): - """ - Calculates interpretation score of audio subsections by splitting the audio into subsections, then using a "leave one out" method to calculate the score of each subsection by removing the subsection and measuring the delta of the output value. - Parameters: - segments: Number of interpretation segments to split audio into. - """ - self.interpretation_segments = segments - return self - - def tokenize(self, x): - if x.get("is_file"): - sample_rate, data = processing_utils.audio_from_file(x["name"]) - else: - file_name = self.base64_to_temp_file_if_needed(x["data"]) - sample_rate, data = processing_utils.audio_from_file(file_name) - leave_one_out_sets = [] - tokens = [] - masks = [] - duration = data.shape[0] - boundaries = np.linspace(0, duration, self.interpretation_segments + 1).tolist() - boundaries = [round(boundary) for boundary in boundaries] - for index in range(len(boundaries) - 1): - start, stop = boundaries[index], boundaries[index + 1] - masks.append((start, stop)) - - # Handle the leave one outs - leave_one_out_data = np.copy(data) - leave_one_out_data[start:stop] = 0 - file = tempfile.NamedTemporaryFile( - delete=False, suffix=".wav", dir=self.DEFAULT_TEMP_DIR - ) - processing_utils.audio_to_file(sample_rate, leave_one_out_data, file.name) - out_data = client_utils.encode_file_to_base64(file.name) - leave_one_out_sets.append(out_data) - file.close() - Path(file.name).unlink() - - # Handle the tokens - token = np.copy(data) - token[0:start] = 0 - token[stop:] = 0 - file = tempfile.NamedTemporaryFile( - delete=False, suffix=".wav", dir=self.DEFAULT_TEMP_DIR - ) - processing_utils.audio_to_file(sample_rate, token, file.name) - token_data = client_utils.encode_file_to_base64(file.name) - file.close() - Path(file.name).unlink() - - tokens.append(token_data) - tokens = [{"name": "token.wav", "data": token} for token in tokens] - leave_one_out_sets = [ - {"name": "loo.wav", "data": loo_set} for loo_set in leave_one_out_sets - ] - return tokens, leave_one_out_sets, masks - - def get_masked_inputs(self, tokens, binary_mask_matrix): - # create a "zero input" vector and get sample rate - x = tokens[0]["data"] - file_name = self.base64_to_temp_file_if_needed(x) - sample_rate, data = processing_utils.audio_from_file(file_name) - zero_input = np.zeros_like(data, dtype="int16") - # decode all of the tokens - token_data = [] - for token in tokens: - file_name = self.base64_to_temp_file_if_needed(token["data"]) - _, data = processing_utils.audio_from_file(file_name) - token_data.append(data) - # construct the masked version - masked_inputs = [] - for binary_mask_vector in binary_mask_matrix: - masked_input = np.copy(zero_input) - for t, b in zip(token_data, binary_mask_vector): - masked_input = masked_input + t * int(b) - file = tempfile.NamedTemporaryFile(delete=False, dir=self.DEFAULT_TEMP_DIR) - processing_utils.audio_to_file(sample_rate, masked_input, file.name) - masked_data = client_utils.encode_file_to_base64(file.name) - file.close() - Path(file.name).unlink() - masked_inputs.append(masked_data) - return masked_inputs - - def postprocess( - self, y: tuple[int, np.ndarray] | str | Path | bytes | None - ) -> str | dict | bytes | None: - """ - Parameters: - y: audio data in either of the following formats: a tuple of (sample_rate, data), or a string filepath or URL to an audio file, or None. - Returns: - base64 url data - """ - if y is None: - return None - if isinstance(y, bytes): - if self.streaming: - return y - file_path = self.file_bytes_to_file(y, "audio") - elif isinstance(y, str) and client_utils.is_http_url_like(y): - return {"name": y, "data": None, "is_file": True} - elif isinstance(y, tuple): - sample_rate, data = y - file_path = self.audio_to_temp_file( - data, - sample_rate, - format=self.format, - ) - self.temp_files.add(file_path) - else: - file_path = self.make_temp_copy_if_needed(y) - return { - "name": file_path, - "data": None, - "is_file": True, - "orig_name": Path(file_path).name, - } - - def stream_output(self, y, output_id: str, first_chunk: bool): - output_file = { - "name": output_id, - "is_stream": True, - "is_file": False, - } - if y is None: - return None, output_file - if isinstance(y, bytes): - return y, output_file - if client_utils.is_http_url_like(y["name"]): - response = requests.get(y["name"]) - binary_data = response.content - else: - output_file["orig_name"] = y["orig_name"] - file_path = y["name"] - is_wav = file_path.endswith(".wav") - with open(file_path, "rb") as f: - binary_data = f.read() - if is_wav: - # strip length information from first chunk header, remove headers entirely from subsequent chunks - if first_chunk: - binary_data = ( - binary_data[:4] + b"\xFF\xFF\xFF\xFF" + binary_data[8:] - ) - binary_data = ( - binary_data[:40] + b"\xFF\xFF\xFF\xFF" + binary_data[44:] - ) - else: - binary_data = binary_data[44:] - return binary_data, output_file - - def check_streamable(self): - if self.source != "microphone": - raise ValueError( - "Audio streaming only available if source is 'microphone'." - ) - - def as_example(self, input_data: str | None) -> str: - return Path(input_data).name if input_data else "" diff --git a/spaces/joheras/glove-relations/app.py b/spaces/joheras/glove-relations/app.py deleted file mode 100644 index ad3e2128de470965c7536b57e487e646e4342992..0000000000000000000000000000000000000000 --- a/spaces/joheras/glove-relations/app.py +++ /dev/null @@ -1,11 +0,0 @@ -import gradio as gr -import pandas as pd -import gensim.downloader as api -wv = api.load('glove-wiki-gigaword-50') - - -def get_associations(term,top_words=10): - return pd.DataFrame(wv.most_similar(positive=[term], topn=10),columns=['word','similarity']) - - -iface = gr.Interface(fn=get_associations, inputs="text", outputs=gr.Dataframe(headers=['word','similarity']),examples=["cat"]).launch(share=False) diff --git a/spaces/jordonpeter01/MusicGen2/audiocraft/data/zip.py b/spaces/jordonpeter01/MusicGen2/audiocraft/data/zip.py deleted file mode 100644 index 1f1154231da321dd38d151ff285dbcff5e38a6e0..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/MusicGen2/audiocraft/data/zip.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing -import zipfile - -from dataclasses import dataclass -from functools import lru_cache -from typing_extensions import Literal - - -DEFAULT_SIZE = 32 -MODE = Literal['r', 'w', 'x', 'a'] - - -@dataclass(order=True) -class PathInZip: - """Class for holding a path of file within a zip file. - - Args: - path: The convention is : - Let's assume there is a zip file /some/location/foo.zip - and inside of it is a json file located at /data/file1.json, - Then we expect path = "/some/location/foo.zip:/data/file1.json" - """ - - INFO_PATH_SEP = ':' - zip_path: str - file_path: str - - def __init__(self, path: str) -> None: - split_path = path.split(self.INFO_PATH_SEP) - assert len(split_path) == 2 - self.zip_path, self.file_path = split_path - - @classmethod - def from_paths(cls, zip_path: str, file_path: str): - return cls(zip_path + cls.INFO_PATH_SEP + file_path) - - def __str__(self) -> str: - return self.zip_path + self.INFO_PATH_SEP + self.file_path - - -def _open_zip(path: str, mode: MODE = 'r'): - return zipfile.ZipFile(path, mode) - - -_cached_open_zip = lru_cache(DEFAULT_SIZE)(_open_zip) - - -def set_zip_cache_size(max_size: int): - """Sets the maximal LRU caching for zip file opening. - - Args: - max_size: the maximal LRU cache. - """ - global _cached_open_zip - _cached_open_zip = lru_cache(max_size)(_open_zip) - - -def open_file_in_zip(path_in_zip: PathInZip, mode: str = 'r') -> typing.IO: - """Opens a file stored inside a zip and returns a file-like object. - - Args: - path_in_zip: A PathInZip object representing the file to return a file-like object of. - mode: The mode in which to open the file with. - Returns: - A file-like object for PathInZip. - """ - zf = _cached_open_zip(path_in_zip.zip_path) - return zf.open(path_in_zip.file_path) diff --git a/spaces/jskalbg/ChatDev01/camel/prompts/__init__.py b/spaces/jskalbg/ChatDev01/camel/prompts/__init__.py deleted file mode 100644 index 89674eca618c2ea92918b9762ab5f6a413ad3972..0000000000000000000000000000000000000000 --- a/spaces/jskalbg/ChatDev01/camel/prompts/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -# Licensed under the Apache License, Version 2.0 (the “License”); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an “AS IS” BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -from .base import TextPrompt, CodePrompt, TextPromptDict -from .ai_society import AISocietyPromptTemplateDict -# from .chat_dev import ChatDevPromptTemplateDict -from .code import CodePromptTemplateDict -from .misalignment import MisalignmentPromptTemplateDict -from .translation import TranslationPromptTemplateDict -from .solution_extraction import SolutionExtractionPromptTemplateDict -from .evaluation import EvaluationPromptTemplateDict -from .task_prompt_template import TaskPromptTemplateDict -from .prompt_templates import PromptTemplateGenerator - -__all__ = [ - 'TextPrompt', - 'CodePrompt', - 'TextPromptDict', - 'AISocietyPromptTemplateDict', - 'CodePromptTemplateDict', - 'MisalignmentPromptTemplateDict', - 'TranslationPromptTemplateDict', - 'EvaluationPromptTemplateDict', - 'TaskPromptTemplateDict', - 'PromptTemplateGenerator', - 'SolutionExtractionPromptTemplateDict', -] diff --git a/spaces/kamezawash/rembg/rembg/session_cloth.py b/spaces/kamezawash/rembg/rembg/session_cloth.py deleted file mode 100644 index 11bcef74378be4d64058772c29ac45240f60a85b..0000000000000000000000000000000000000000 --- a/spaces/kamezawash/rembg/rembg/session_cloth.py +++ /dev/null @@ -1,88 +0,0 @@ -from typing import List - -import numpy as np -from PIL import Image -from PIL.Image import Image as PILImage -from scipy.special import log_softmax - -from .session_base import BaseSession - -pallete1 = [ - 0, - 0, - 0, - 255, - 255, - 255, - 0, - 0, - 0, - 0, - 0, - 0, -] - -pallete2 = [ - 0, - 0, - 0, - 0, - 0, - 0, - 255, - 255, - 255, - 0, - 0, - 0, -] - -pallete3 = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 255, - 255, - 255, -] - - -class ClothSession(BaseSession): - def predict(self, img: PILImage) -> List[PILImage]: - ort_outs = self.inner_session.run( - None, self.normalize(img, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), (768, 768)) - ) - - pred = ort_outs - pred = log_softmax(pred[0], 1) - pred = np.argmax(pred, axis=1, keepdims=True) - pred = np.squeeze(pred, 0) - pred = np.squeeze(pred, 0) - - mask = Image.fromarray(pred.astype("uint8"), mode="L") - mask = mask.resize(img.size, Image.LANCZOS) - - masks = [] - - mask1 = mask.copy() - mask1.putpalette(pallete1) - mask1 = mask1.convert("RGB").convert("L") - masks.append(mask1) - - mask2 = mask.copy() - mask2.putpalette(pallete2) - mask2 = mask2.convert("RGB").convert("L") - masks.append(mask2) - - mask3 = mask.copy() - mask3.putpalette(pallete3) - mask3 = mask3.convert("RGB").convert("L") - masks.append(mask3) - - return masks diff --git a/spaces/katielink/spleen_segmentation/app.py b/spaces/katielink/spleen_segmentation/app.py deleted file mode 100644 index 5f3b4cb2443d2bb6f08190bb564b0b729ef6d476..0000000000000000000000000000000000000000 --- a/spaces/katielink/spleen_segmentation/app.py +++ /dev/null @@ -1,80 +0,0 @@ -import os -import gradio as gr -import torch -from monai import bundle - -# Set the bundle name and download path -BUNDLE_NAME = 'spleen_ct_segmentation_v0.1.0' -BUNDLE_PATH = os.path.join(torch.hub.get_dir(), 'bundle', BUNDLE_NAME) - -description = """ -## 🚀 To run -Upload a abdominal CT scan, or try one of the examples below! -If you want to see a different slice, update the slider. - -More details on the model can be found [here!](https://huggingface.co/katielink/spleen_ct_segmentation_v0.1.0) - -## ⚠️ Disclaimer -This is an example, not to be used for diagnostic purposes. - -""" - -# Set up some examples from the test set for better user experience -examples = [ - ['examples/spleen_1.nii.gz', 50], - ['examples/spleen_11.nii.gz', 50], -] - -# Load the pretrained model from Hugging Face Hub -model, _, _ = bundle.load( - name = BUNDLE_NAME, - source = 'huggingface_hub', - repo = 'katielink/spleen_ct_segmentation_v0.1.0', - load_ts_module=True, -) - -# Use GPU if available -device = "cuda:0" if torch.cuda.is_available() else "cpu" - -# Load transforms and inferer directly from the bundle -parser = bundle.load_bundle_config(BUNDLE_PATH, 'inference.json') -preproc_transforms = parser.get_parsed_content( - 'preprocessing', - lazy=True, eval_expr=True,instantiate=True -) -inferer = parser.get_parsed_content( - 'inferer', - lazy=True, eval_expr=True, instantiate=True -) - -# Define the prediction function -def predict(input_file, z_axis, model=model, device=device): - data = {'image': [input_file.name]} - data = preproc_transforms(data) - - model.to(device) - model.eval() - with torch.no_grad(): - inputs = data['image'].to(device)[None,...] - data['pred'] = inferer(inputs=inputs, network=model) - - input_image = data['image'].numpy() - pred_image = torch.argmax(data['pred'], dim=1).cpu().detach().numpy() - - return input_image[0, :, :, z_axis], pred_image[0, :, :, z_axis]*255 - -# Set up the demo interface -iface = gr.Interface( - fn=predict, - inputs=[ - gr.File(label='input file'), - gr.Slider(0, 200, label='slice', value=50) - ], - outputs=['image', 'image'], - title='Segment the Spleen using MONAI! 🩸', - description=description, - examples=examples, -) - -# Launch the demo -iface.launch() diff --git a/spaces/kcagle/AutoGPT/autogpt/commands/web_playwright.py b/spaces/kcagle/AutoGPT/autogpt/commands/web_playwright.py deleted file mode 100644 index 4e388ded203cefb5e24f9116f7fe5b8a94893413..0000000000000000000000000000000000000000 --- a/spaces/kcagle/AutoGPT/autogpt/commands/web_playwright.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Web scraping commands using Playwright""" -from __future__ import annotations - -try: - from playwright.sync_api import sync_playwright -except ImportError: - print( - "Playwright not installed. Please install it with 'pip install playwright' to use." - ) -from bs4 import BeautifulSoup - -from autogpt.processing.html import extract_hyperlinks, format_hyperlinks - - -def scrape_text(url: str) -> str: - """Scrape text from a webpage - - Args: - url (str): The URL to scrape text from - - Returns: - str: The scraped text - """ - with sync_playwright() as p: - browser = p.chromium.launch() - page = browser.new_page() - - try: - page.goto(url) - html_content = page.content() - soup = BeautifulSoup(html_content, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - - except Exception as e: - text = f"Error: {str(e)}" - - finally: - browser.close() - - return text - - -def scrape_links(url: str) -> str | list[str]: - """Scrape links from a webpage - - Args: - url (str): The URL to scrape links from - - Returns: - Union[str, List[str]]: The scraped links - """ - with sync_playwright() as p: - browser = p.chromium.launch() - page = browser.new_page() - - try: - page.goto(url) - html_content = page.content() - soup = BeautifulSoup(html_content, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - hyperlinks = extract_hyperlinks(soup, url) - formatted_links = format_hyperlinks(hyperlinks) - - except Exception as e: - formatted_links = f"Error: {str(e)}" - - finally: - browser.close() - - return formatted_links diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/util/__init__.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/util/__init__.py deleted file mode 100644 index 04eecb58b62f8c9d11d17606c6241d278a48b9b9..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/util/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -"""This package includes a miscellaneous collection of useful helper functions.""" -from src.face3d.util import * - diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/audio2pose_models/discriminator.py b/spaces/kevinwang676/ChatGLM2-SadTalker/src/audio2pose_models/discriminator.py deleted file mode 100644 index 339c38e4812ff38a810f0f3a1c01812f6d5d78db..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/audio2pose_models/discriminator.py +++ /dev/null @@ -1,76 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -class ConvNormRelu(nn.Module): - def __init__(self, conv_type='1d', in_channels=3, out_channels=64, downsample=False, - kernel_size=None, stride=None, padding=None, norm='BN', leaky=False): - super().__init__() - if kernel_size is None: - if downsample: - kernel_size, stride, padding = 4, 2, 1 - else: - kernel_size, stride, padding = 3, 1, 1 - - if conv_type == '2d': - self.conv = nn.Conv2d( - in_channels, - out_channels, - kernel_size, - stride, - padding, - bias=False, - ) - if norm == 'BN': - self.norm = nn.BatchNorm2d(out_channels) - elif norm == 'IN': - self.norm = nn.InstanceNorm2d(out_channels) - else: - raise NotImplementedError - elif conv_type == '1d': - self.conv = nn.Conv1d( - in_channels, - out_channels, - kernel_size, - stride, - padding, - bias=False, - ) - if norm == 'BN': - self.norm = nn.BatchNorm1d(out_channels) - elif norm == 'IN': - self.norm = nn.InstanceNorm1d(out_channels) - else: - raise NotImplementedError - nn.init.kaiming_normal_(self.conv.weight) - - self.act = nn.LeakyReLU(negative_slope=0.2, inplace=False) if leaky else nn.ReLU(inplace=True) - - def forward(self, x): - x = self.conv(x) - if isinstance(self.norm, nn.InstanceNorm1d): - x = self.norm(x.permute((0, 2, 1))).permute((0, 2, 1)) # normalize on [C] - else: - x = self.norm(x) - x = self.act(x) - return x - - -class PoseSequenceDiscriminator(nn.Module): - def __init__(self, cfg): - super().__init__() - self.cfg = cfg - leaky = self.cfg.MODEL.DISCRIMINATOR.LEAKY_RELU - - self.seq = nn.Sequential( - ConvNormRelu('1d', cfg.MODEL.DISCRIMINATOR.INPUT_CHANNELS, 256, downsample=True, leaky=leaky), # B, 256, 64 - ConvNormRelu('1d', 256, 512, downsample=True, leaky=leaky), # B, 512, 32 - ConvNormRelu('1d', 512, 1024, kernel_size=3, stride=1, padding=1, leaky=leaky), # B, 1024, 16 - nn.Conv1d(1024, 1, kernel_size=3, stride=1, padding=1, bias=True) # B, 1, 16 - ) - - def forward(self, x): - x = x.reshape(x.size(0), x.size(1), -1).transpose(1, 2) - x = self.seq(x) - x = x.squeeze(1) - return x \ No newline at end of file diff --git a/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r50.py b/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r50.py deleted file mode 100644 index 08ba55dbbea6df0afffddbb3d1ed173efad99604..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r50.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "arcface" -config.network = "r50" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/ms1m-retinaface-t1" -config.num_classes = 93431 -config.num_image = 5179510 -config.num_epoch = 25 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/kevinwang676/VoiceChangers/src/face3d/models/arcface_torch/configs/ms1mv3_r50.py b/spaces/kevinwang676/VoiceChangers/src/face3d/models/arcface_torch/configs/ms1mv3_r50.py deleted file mode 100644 index 08ba55dbbea6df0afffddbb3d1ed173efad99604..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChangers/src/face3d/models/arcface_torch/configs/ms1mv3_r50.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "arcface" -config.network = "r50" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/ms1m-retinaface-t1" -config.num_classes = 93431 -config.num_image = 5179510 -config.num_epoch = 25 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/kira4424/VITS-fast-fine-tuning/voice_upload.py b/spaces/kira4424/VITS-fast-fine-tuning/voice_upload.py deleted file mode 100644 index 5c825a933a7970e17e57c381b59a5fc4e06ea569..0000000000000000000000000000000000000000 --- a/spaces/kira4424/VITS-fast-fine-tuning/voice_upload.py +++ /dev/null @@ -1,28 +0,0 @@ -from google.colab import files -import shutil -import os -import argparse -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--type", type=str, required=True, help="type of file to upload") - args = parser.parse_args() - file_type = args.type - - basepath = os.getcwd() - uploaded = files.upload() # 上传文件 - assert(file_type in ['zip', 'audio', 'video']) - if file_type == "zip": - upload_path = "./custom_character_voice/" - for filename in uploaded.keys(): - #将上传的文件移动到指定的位置上 - shutil.move(os.path.join(basepath, filename), os.path.join(upload_path, "custom_character_voice.zip")) - elif file_type == "audio": - upload_path = "./raw_audio/" - for filename in uploaded.keys(): - #将上传的文件移动到指定的位置上 - shutil.move(os.path.join(basepath, filename), os.path.join(upload_path, filename)) - elif file_type == "video": - upload_path = "./video_data/" - for filename in uploaded.keys(): - # 将上传的文件移动到指定的位置上 - shutil.move(os.path.join(basepath, filename), os.path.join(upload_path, filename)) \ No newline at end of file diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/exp/upernet_global_small/run.sh b/spaces/kirch/Text2Video-Zero/annotator/uniformer/exp/upernet_global_small/run.sh deleted file mode 100644 index 9fb22edfa7a32624ea08a63fe7d720c40db3b696..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/exp/upernet_global_small/run.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -work_path=$(dirname $0) -PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ -python -m torch.distributed.launch --nproc_per_node=8 \ - tools/train.py ${work_path}/config.py \ - --launcher pytorch \ - --options model.backbone.pretrained_path='your_model_path/uniformer_small_in1k.pth' \ - --work-dir ${work_path}/ckpt \ - 2>&1 | tee -a ${work_path}/log.txt diff --git a/spaces/kouenYoung/anime-tts/config.py b/spaces/kouenYoung/anime-tts/config.py deleted file mode 100644 index bd4dc0beeeecd9c3c6508a2d8b8bc26b02550db5..0000000000000000000000000000000000000000 --- a/spaces/kouenYoung/anime-tts/config.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - - -def get_pth_file(dir_path): - file_list = [] - for root, dirs, files in os.walk(dir_path): - for file in files: - if file.endswith("pth"): - file_list.append(os.path.join(root, file)) - - return file_list - - -file_list = get_pth_file("./") -epoch = max([epochs.split("/")[-1].split("_")[0] for epochs in file_list]) -pth_path = f"{epoch}_epochs.pth" -config_json = "config.json" diff --git a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/data/datasets.py b/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/data/datasets.py deleted file mode 100644 index c4f503dafffb970d8dbaca33934da417036d1e55..0000000000000000000000000000000000000000 --- a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/data/datasets.py +++ /dev/null @@ -1,304 +0,0 @@ -import glob -import logging -import os -import random - -import albumentations as A -import cv2 -import numpy as np -import torch -import torch.nn.functional as F -import webdataset -from omegaconf import open_dict, OmegaConf -from skimage.feature import canny -from skimage.transform import rescale, resize -from torch.utils.data import Dataset, IterableDataset, DataLoader, DistributedSampler, ConcatDataset - -from saicinpainting.evaluation.data import InpaintingDataset as InpaintingEvaluationDataset, \ - OurInpaintingDataset as OurInpaintingEvaluationDataset, ceil_modulo, InpaintingEvalOnlineDataset -from saicinpainting.training.data.aug import IAAAffine2, IAAPerspective2 -from saicinpainting.training.data.masks import get_mask_generator - -LOGGER = logging.getLogger(__name__) - - -class InpaintingTrainDataset(Dataset): - def __init__(self, indir, mask_generator, transform): - self.in_files = list(glob.glob(os.path.join(indir, '**', '*.jpg'), recursive=True)) - self.mask_generator = mask_generator - self.transform = transform - self.iter_i = 0 - - def __len__(self): - return len(self.in_files) - - def __getitem__(self, item): - path = self.in_files[item] - img = cv2.imread(path) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = self.transform(image=img)['image'] - img = np.transpose(img, (2, 0, 1)) - # TODO: maybe generate mask before augmentations? slower, but better for segmentation-based masks - mask = self.mask_generator(img, iter_i=self.iter_i) - self.iter_i += 1 - return dict(image=img, - mask=mask) - - -class InpaintingTrainWebDataset(IterableDataset): - def __init__(self, indir, mask_generator, transform, shuffle_buffer=200): - self.impl = webdataset.Dataset(indir).shuffle(shuffle_buffer).decode('rgb').to_tuple('jpg') - self.mask_generator = mask_generator - self.transform = transform - - def __iter__(self): - for iter_i, (img,) in enumerate(self.impl): - img = np.clip(img * 255, 0, 255).astype('uint8') - img = self.transform(image=img)['image'] - img = np.transpose(img, (2, 0, 1)) - mask = self.mask_generator(img, iter_i=iter_i) - yield dict(image=img, - mask=mask) - - -class ImgSegmentationDataset(Dataset): - def __init__(self, indir, mask_generator, transform, out_size, segm_indir, semantic_seg_n_classes): - self.indir = indir - self.segm_indir = segm_indir - self.mask_generator = mask_generator - self.transform = transform - self.out_size = out_size - self.semantic_seg_n_classes = semantic_seg_n_classes - self.in_files = list(glob.glob(os.path.join(indir, '**', '*.jpg'), recursive=True)) - - def __len__(self): - return len(self.in_files) - - def __getitem__(self, item): - path = self.in_files[item] - img = cv2.imread(path) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = cv2.resize(img, (self.out_size, self.out_size)) - img = self.transform(image=img)['image'] - img = np.transpose(img, (2, 0, 1)) - mask = self.mask_generator(img) - segm, segm_classes= self.load_semantic_segm(path) - result = dict(image=img, - mask=mask, - segm=segm, - segm_classes=segm_classes) - return result - - def load_semantic_segm(self, img_path): - segm_path = img_path.replace(self.indir, self.segm_indir).replace(".jpg", ".png") - mask = cv2.imread(segm_path, cv2.IMREAD_GRAYSCALE) - mask = cv2.resize(mask, (self.out_size, self.out_size)) - tensor = torch.from_numpy(np.clip(mask.astype(int)-1, 0, None)) - ohe = F.one_hot(tensor.long(), num_classes=self.semantic_seg_n_classes) # w x h x n_classes - return ohe.permute(2, 0, 1).float(), tensor.unsqueeze(0) - - -def get_transforms(transform_variant, out_size): - if transform_variant == 'default': - transform = A.Compose([ - A.RandomScale(scale_limit=0.2), # +/- 20% - A.PadIfNeeded(min_height=out_size, min_width=out_size), - A.RandomCrop(height=out_size, width=out_size), - A.HorizontalFlip(), - A.CLAHE(), - A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), - A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), - A.ToFloat() - ]) - elif transform_variant == 'distortions': - transform = A.Compose([ - IAAPerspective2(scale=(0.0, 0.06)), - IAAAffine2(scale=(0.7, 1.3), - rotate=(-40, 40), - shear=(-0.1, 0.1)), - A.PadIfNeeded(min_height=out_size, min_width=out_size), - A.OpticalDistortion(), - A.RandomCrop(height=out_size, width=out_size), - A.HorizontalFlip(), - A.CLAHE(), - A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), - A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), - A.ToFloat() - ]) - elif transform_variant == 'distortions_scale05_1': - transform = A.Compose([ - IAAPerspective2(scale=(0.0, 0.06)), - IAAAffine2(scale=(0.5, 1.0), - rotate=(-40, 40), - shear=(-0.1, 0.1), - p=1), - A.PadIfNeeded(min_height=out_size, min_width=out_size), - A.OpticalDistortion(), - A.RandomCrop(height=out_size, width=out_size), - A.HorizontalFlip(), - A.CLAHE(), - A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), - A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), - A.ToFloat() - ]) - elif transform_variant == 'distortions_scale03_12': - transform = A.Compose([ - IAAPerspective2(scale=(0.0, 0.06)), - IAAAffine2(scale=(0.3, 1.2), - rotate=(-40, 40), - shear=(-0.1, 0.1), - p=1), - A.PadIfNeeded(min_height=out_size, min_width=out_size), - A.OpticalDistortion(), - A.RandomCrop(height=out_size, width=out_size), - A.HorizontalFlip(), - A.CLAHE(), - A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), - A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), - A.ToFloat() - ]) - elif transform_variant == 'distortions_scale03_07': - transform = A.Compose([ - IAAPerspective2(scale=(0.0, 0.06)), - IAAAffine2(scale=(0.3, 0.7), # scale 512 to 256 in average - rotate=(-40, 40), - shear=(-0.1, 0.1), - p=1), - A.PadIfNeeded(min_height=out_size, min_width=out_size), - A.OpticalDistortion(), - A.RandomCrop(height=out_size, width=out_size), - A.HorizontalFlip(), - A.CLAHE(), - A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), - A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), - A.ToFloat() - ]) - elif transform_variant == 'distortions_light': - transform = A.Compose([ - IAAPerspective2(scale=(0.0, 0.02)), - IAAAffine2(scale=(0.8, 1.8), - rotate=(-20, 20), - shear=(-0.03, 0.03)), - A.PadIfNeeded(min_height=out_size, min_width=out_size), - A.RandomCrop(height=out_size, width=out_size), - A.HorizontalFlip(), - A.CLAHE(), - A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), - A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), - A.ToFloat() - ]) - elif transform_variant == 'non_space_transform': - transform = A.Compose([ - A.CLAHE(), - A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), - A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), - A.ToFloat() - ]) - elif transform_variant == 'no_augs': - transform = A.Compose([ - A.ToFloat() - ]) - else: - raise ValueError(f'Unexpected transform_variant {transform_variant}') - return transform - - -def make_default_train_dataloader(indir, kind='default', out_size=512, mask_gen_kwargs=None, transform_variant='default', - mask_generator_kind="mixed", dataloader_kwargs=None, ddp_kwargs=None, **kwargs): - LOGGER.info(f'Make train dataloader {kind} from {indir}. Using mask generator={mask_generator_kind}') - - mask_generator = get_mask_generator(kind=mask_generator_kind, kwargs=mask_gen_kwargs) - transform = get_transforms(transform_variant, out_size) - - if kind == 'default': - dataset = InpaintingTrainDataset(indir=indir, - mask_generator=mask_generator, - transform=transform, - **kwargs) - elif kind == 'default_web': - dataset = InpaintingTrainWebDataset(indir=indir, - mask_generator=mask_generator, - transform=transform, - **kwargs) - elif kind == 'img_with_segm': - dataset = ImgSegmentationDataset(indir=indir, - mask_generator=mask_generator, - transform=transform, - out_size=out_size, - **kwargs) - else: - raise ValueError(f'Unknown train dataset kind {kind}') - - if dataloader_kwargs is None: - dataloader_kwargs = {} - - is_dataset_only_iterable = kind in ('default_web',) - - if ddp_kwargs is not None and not is_dataset_only_iterable: - dataloader_kwargs['shuffle'] = False - dataloader_kwargs['sampler'] = DistributedSampler(dataset, **ddp_kwargs) - - if is_dataset_only_iterable and 'shuffle' in dataloader_kwargs: - with open_dict(dataloader_kwargs): - del dataloader_kwargs['shuffle'] - - dataloader = DataLoader(dataset, **dataloader_kwargs) - return dataloader - - -def make_default_val_dataset(indir, kind='default', out_size=512, transform_variant='default', **kwargs): - if OmegaConf.is_list(indir) or isinstance(indir, (tuple, list)): - return ConcatDataset([ - make_default_val_dataset(idir, kind=kind, out_size=out_size, transform_variant=transform_variant, **kwargs) for idir in indir - ]) - - LOGGER.info(f'Make val dataloader {kind} from {indir}') - mask_generator = get_mask_generator(kind=kwargs.get("mask_generator_kind"), kwargs=kwargs.get("mask_gen_kwargs")) - - if transform_variant is not None: - transform = get_transforms(transform_variant, out_size) - - if kind == 'default': - dataset = InpaintingEvaluationDataset(indir, **kwargs) - elif kind == 'our_eval': - dataset = OurInpaintingEvaluationDataset(indir, **kwargs) - elif kind == 'img_with_segm': - dataset = ImgSegmentationDataset(indir=indir, - mask_generator=mask_generator, - transform=transform, - out_size=out_size, - **kwargs) - elif kind == 'online': - dataset = InpaintingEvalOnlineDataset(indir=indir, - mask_generator=mask_generator, - transform=transform, - out_size=out_size, - **kwargs) - else: - raise ValueError(f'Unknown val dataset kind {kind}') - - return dataset - - -def make_default_val_dataloader(*args, dataloader_kwargs=None, **kwargs): - dataset = make_default_val_dataset(*args, **kwargs) - - if dataloader_kwargs is None: - dataloader_kwargs = {} - dataloader = DataLoader(dataset, **dataloader_kwargs) - return dataloader - - -def make_constant_area_crop_params(img_height, img_width, min_size=128, max_size=512, area=256*256, round_to_mod=16): - min_size = min(img_height, img_width, min_size) - max_size = min(img_height, img_width, max_size) - if random.random() < 0.5: - out_height = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod)) - out_width = min(max_size, ceil_modulo(area // out_height, round_to_mod)) - else: - out_width = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod)) - out_height = min(max_size, ceil_modulo(area // out_width, round_to_mod)) - - start_y = random.randint(0, img_height - out_height) - start_x = random.randint(0, img_width - out_width) - return (start_y, start_x, out_height, out_width) diff --git a/spaces/kumahiyo/line-bot-stable-diffusion/Dockerfile b/spaces/kumahiyo/line-bot-stable-diffusion/Dockerfile deleted file mode 100644 index 7a568233b7ee31ffffb52339e5a6dd0aba5b1ae8..0000000000000000000000000000000000000000 --- a/spaces/kumahiyo/line-bot-stable-diffusion/Dockerfile +++ /dev/null @@ -1,70 +0,0 @@ -FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu20.04 - -ENV DEBIAN_FRONTEND=noninteractive -ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python - -RUN apt update && \ - apt install -y bash \ - build-essential \ - git \ - git-lfs \ - curl \ - ca-certificates \ - libsndfile1-dev \ - python3.8 \ - python3-pip \ - python3.8-venv && \ - rm -rf /var/lib/apt/lists - -# make sure to use venv -RUN python3 -m venv /opt/venv -ENV PATH="/opt/venv/bin:$PATH" - -# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) -RUN python3 -m pip install --no-cache-dir --upgrade pip && \ - python3 -m pip install --no-cache-dir \ - torch \ - torchvision \ - torchaudio \ - --extra-index-url https://download.pytorch.org/whl/cu117 && \ - python3 -m pip install --no-cache-dir \ - accelerate \ - datasets \ - hf-doc-builder \ - huggingface-hub \ - Jinja2 \ - librosa \ - numpy \ - scipy \ - tensorboard \ - transformers - -# Set the working directory to /code -WORKDIR /code - -# Copy the current directory contents into the container at /code -COPY ./requirements.txt /code/requirements.txt - -# Install requirements.txt -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -COPY . . - -RUN mkdir tmpdir -RUN chmod 777 tmpdir - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user -# Switch to the "user" user -USER user -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app - -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"] \ No newline at end of file diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/attr/_next_gen.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/attr/_next_gen.py deleted file mode 100644 index 8f7c0b9a46b7a0ee008f94b8054baf5807df043a..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/attr/_next_gen.py +++ /dev/null @@ -1,232 +0,0 @@ -# SPDX-License-Identifier: MIT - -""" -These are keyword-only APIs that call `attr.s` and `attr.ib` with different -default values. -""" - - -from functools import partial - -from . import setters -from ._funcs import asdict as _asdict -from ._funcs import astuple as _astuple -from ._make import ( - NOTHING, - _frozen_setattrs, - _ng_default_on_setattr, - attrib, - attrs, -) -from .exceptions import UnannotatedAttributeError - - -def define( - maybe_cls=None, - *, - these=None, - repr=None, - unsafe_hash=None, - hash=None, - init=None, - slots=True, - frozen=False, - weakref_slot=True, - str=False, - auto_attribs=None, - kw_only=False, - cache_hash=False, - auto_exc=True, - eq=None, - order=False, - auto_detect=True, - getstate_setstate=None, - on_setattr=None, - field_transformer=None, - match_args=True, -): - r""" - Define an *attrs* class. - - Differences to the classic `attr.s` that it uses underneath: - - - Automatically detect whether or not *auto_attribs* should be `True` (c.f. - *auto_attribs* parameter). - - If *frozen* is `False`, run converters and validators when setting an - attribute by default. - - *slots=True* - - .. caution:: - - Usually this has only upsides and few visible effects in everyday - programming. But it *can* lead to some suprising behaviors, so please - make sure to read :term:`slotted classes`. - - *auto_exc=True* - - *auto_detect=True* - - *order=False* - - Some options that were only relevant on Python 2 or were kept around for - backwards-compatibility have been removed. - - Please note that these are all defaults and you can change them as you - wish. - - :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves - exactly like `attr.s`. If left `None`, `attr.s` will try to guess: - - 1. If any attributes are annotated and no unannotated `attrs.fields`\ s - are found, it assumes *auto_attribs=True*. - 2. Otherwise it assumes *auto_attribs=False* and tries to collect - `attrs.fields`\ s. - - For now, please refer to `attr.s` for the rest of the parameters. - - .. versionadded:: 20.1.0 - .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``. - .. versionadded:: 22.2.0 - *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). - """ - - def do_it(cls, auto_attribs): - return attrs( - maybe_cls=cls, - these=these, - repr=repr, - hash=hash, - unsafe_hash=unsafe_hash, - init=init, - slots=slots, - frozen=frozen, - weakref_slot=weakref_slot, - str=str, - auto_attribs=auto_attribs, - kw_only=kw_only, - cache_hash=cache_hash, - auto_exc=auto_exc, - eq=eq, - order=order, - auto_detect=auto_detect, - collect_by_mro=True, - getstate_setstate=getstate_setstate, - on_setattr=on_setattr, - field_transformer=field_transformer, - match_args=match_args, - ) - - def wrap(cls): - """ - Making this a wrapper ensures this code runs during class creation. - - We also ensure that frozen-ness of classes is inherited. - """ - nonlocal frozen, on_setattr - - had_on_setattr = on_setattr not in (None, setters.NO_OP) - - # By default, mutable classes convert & validate on setattr. - if frozen is False and on_setattr is None: - on_setattr = _ng_default_on_setattr - - # However, if we subclass a frozen class, we inherit the immutability - # and disable on_setattr. - for base_cls in cls.__bases__: - if base_cls.__setattr__ is _frozen_setattrs: - if had_on_setattr: - raise ValueError( - "Frozen classes can't use on_setattr " - "(frozen-ness was inherited)." - ) - - on_setattr = setters.NO_OP - break - - if auto_attribs is not None: - return do_it(cls, auto_attribs) - - try: - return do_it(cls, True) - except UnannotatedAttributeError: - return do_it(cls, False) - - # maybe_cls's type depends on the usage of the decorator. It's a class - # if it's used as `@attrs` but ``None`` if used as `@attrs()`. - if maybe_cls is None: - return wrap - else: - return wrap(maybe_cls) - - -mutable = define -frozen = partial(define, frozen=True, on_setattr=None) - - -def field( - *, - default=NOTHING, - validator=None, - repr=True, - hash=None, - init=True, - metadata=None, - type=None, - converter=None, - factory=None, - kw_only=False, - eq=None, - order=None, - on_setattr=None, - alias=None, -): - """ - Identical to `attr.ib`, except keyword-only and with some arguments - removed. - - .. versionadded:: 23.1.0 - The *type* parameter has been re-added; mostly for - {func}`attrs.make_class`. Please note that type checkers ignore this - metadata. - .. versionadded:: 20.1.0 - """ - return attrib( - default=default, - validator=validator, - repr=repr, - hash=hash, - init=init, - metadata=metadata, - type=type, - converter=converter, - factory=factory, - kw_only=kw_only, - eq=eq, - order=order, - on_setattr=on_setattr, - alias=alias, - ) - - -def asdict(inst, *, recurse=True, filter=None, value_serializer=None): - """ - Same as `attr.asdict`, except that collections types are always retained - and dict is always used as *dict_factory*. - - .. versionadded:: 21.3.0 - """ - return _asdict( - inst=inst, - recurse=recurse, - filter=filter, - value_serializer=value_serializer, - retain_collection_types=True, - ) - - -def astuple(inst, *, recurse=True, filter=None): - """ - Same as `attr.astuple`, except that collections types are always retained - and `tuple` is always used as the *tuple_factory*. - - .. versionadded:: 21.3.0 - """ - return _astuple( - inst=inst, recurse=recurse, filter=filter, retain_collection_types=True - ) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_S_U_B_.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_S_U_B_.py deleted file mode 100644 index bb8375a5f83029d2b05388d5c882edd9c4aba95c..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_S_U_B_.py +++ /dev/null @@ -1,5 +0,0 @@ -from .otBase import BaseTTXConverter - - -class table_G_S_U_B_(BaseTTXConverter): - pass diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-741dcd26.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-741dcd26.js deleted file mode 100644 index bc8b46f2ee98ab28732b30676a33e389d396b54a..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-741dcd26.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as k,i as L,s as j,G as w,C as o,M as c,g,F as T,q as d,r as C,e as h,m as v,p as b,t as H,n as M,x as S,$ as q,H as B,h as z,j as D,y as E}from"./index-7c0e54a6.js";import{B as F}from"./Button-661a0701.js";function G(t){let e,n;return{c(){e=w("div"),o(e,"class",n="prose "+t[1].join(" ")+" svelte-1ybaih5"),o(e,"id",t[0]),c(e,"min",t[4]),c(e,"hide",!t[3])},m(s,i){g(s,e,i),e.innerHTML=t[2]},p(s,[i]){i&4&&(e.innerHTML=s[2]),i&2&&n!==(n="prose "+s[1].join(" ")+" svelte-1ybaih5")&&o(e,"class",n),i&1&&o(e,"id",s[0]),i&18&&c(e,"min",s[4]),i&10&&c(e,"hide",!s[3])},i:T,o:T,d(s){s&&d(e)}}}function A(t,e,n){let{elem_id:s=""}=e,{elem_classes:i=[]}=e,{value:m}=e,{visible:u=!0}=e,{min_height:f=!1}=e;const l=C();return t.$$set=a=>{"elem_id"in a&&n(0,s=a.elem_id),"elem_classes"in a&&n(1,i=a.elem_classes),"value"in a&&n(2,m=a.value),"visible"in a&&n(3,u=a.visible),"min_height"in a&&n(4,f=a.min_height)},t.$$.update=()=>{t.$$.dirty&4&&l("change")},[s,i,m,u,f]}class I extends k{constructor(e){super(),L(this,e,A,G,j,{elem_id:0,elem_classes:1,value:2,visible:3,min_height:4})}}function J(t){let e,n,s,i,m;const u=[t[4],{variant:"center"}];let f={};for(let l=0;l{"label"in _&&n(5,s=_.label),"elem_id"in _&&n(0,i=_.elem_id),"elem_classes"in _&&n(1,m=_.elem_classes),"visible"in _&&n(2,u=_.visible),"value"in _&&n(3,f=_.value),"loading_status"in _&&n(4,l=_.loading_status)},t.$$.update=()=>{t.$$.dirty&32&&a("change")},[i,m,u,f,l,s,r]}class O extends k{constructor(e){super(),L(this,e,N,K,j,{label:5,elem_id:0,elem_classes:1,visible:2,value:3,loading_status:4})}}const R=O,U=["static"],V=t=>({type:{payload:"string"},description:{payload:"HTML output"}});export{R as Component,V as document,U as modes}; -//# sourceMappingURL=index-741dcd26.js.map diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-50e85c2b.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-50e85c2b.js deleted file mode 100644 index 1c50d6b10798caaca7322615177df22e40e9b70d..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-50e85c2b.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as P,i as Q,s as W,G as S,H as J,f as de,C as w,g as N,p as q,l as ee,t as D,o as le,q as R,r as he,D as F,J as L,a2 as me,aa as Ae,ab as se,N as ge,I as te,M as B,E as y,K as ne,b as V,ag as ie,B as Y,F as G,a as x,e as U,m as j,ad as oe,k as $,n as z,a0 as ye,a8 as Be,x as qe,$ as De,h as Ce,j as Se,y as X}from"./index-8c3da1d9.js";/* empty css */import{B as Ee}from"./Button-62634b34.js";import{B as Ne}from"./BlockTitle-338c46c0.js";/* empty css */import"./Info-b95ed9db.js";function ue(t,e,l){const s=t.slice();return s[18]=e[l],s}function fe(t){let e,l,s,u,r,n=t[0],a=[];for(let i=0;i{s&&(l||(l=se(e,ie,{duration:200,y:5},!0)),l.run(1))}),s=!0)},o(i){l||(l=se(e,ie,{duration:200,y:5},!1)),l.run(0),s=!1},d(i){i&&R(e),ge(a,i),t[17](null),i&&l&&l.end(),u=!1,r()}}}function ae(t){let e,l,s,u=t[18]+"",r,n,a,i;return{c(){e=S("li"),l=S("span"),l.textContent="✓",s=J(),r=te(u),n=J(),w(l,"class","inner-item svelte-1udn3b5"),B(l,"hide",!t[9].includes(t[18])),w(e,"class","item svelte-1udn3b5"),w(e,"role","button"),w(e,"data-value",a=t[18]),w(e,"aria-label",i=t[18]),B(e,"selected",t[9].includes(t[18])),B(e,"active",t[2]===t[18]),B(e,"bg-gray-100",t[2]===t[18]),B(e,"dark:bg-gray-600",t[2]===t[18])},m(f,c){N(f,e,c),y(e,l),y(e,s),y(e,r),y(e,n)},p(f,c){c&513&&B(l,"hide",!f[9].includes(f[18])),c&1&&u!==(u=f[18]+"")&&ne(r,u),c&1&&a!==(a=f[18])&&w(e,"data-value",a),c&1&&i!==(i=f[18])&&w(e,"aria-label",i),c&513&&B(e,"selected",f[9].includes(f[18])),c&5&&B(e,"active",f[2]===f[18]),c&5&&B(e,"bg-gray-100",f[2]===f[18]),c&5&&B(e,"dark:bg-gray-600",f[2]===f[18])},d(f){f&&R(e)}}}function Re(t){let e,l,s,u,r=t[1]&&!t[3]&&fe(t);return{c(){e=S("div"),l=J(),r&&r.c(),s=de(),w(e,"class","reference")},m(n,a){N(n,e,a),t[15](e),N(n,l,a),r&&r.m(n,a),N(n,s,a),u=!0},p(n,[a]){n[1]&&!n[3]?r?(r.p(n,a),a&10&&q(r,1)):(r=fe(n),r.c(),q(r,1),r.m(s.parentNode,s)):r&&(ee(),D(r,1,1,()=>{r=null}),le())},i(n){u||(q(r),u=!0)},o(n){D(r),u=!1},d(n){n&&R(e),t[15](null),n&&R(l),r&&r.d(n),n&&R(s)}}}function Je(t,e,l){let s,{value:u=void 0}=e,{filtered:r}=e,{showOptions:n=!1}=e,{activeOption:a}=e,{disabled:i=!1}=e,f,c,g,_,v,k,b,m;const p=he();function O(h){V[h?"unshift":"push"](()=>{_=h,l(4,_)})}const E=h=>p("change",h);function T(h){V[h?"unshift":"push"](()=>{v=h,l(5,v)})}return t.$$set=h=>{"value"in h&&l(11,u=h.value),"filtered"in h&&l(0,r=h.filtered),"showOptions"in h&&l(1,n=h.showOptions),"activeOption"in h&&l(2,a=h.activeOption),"disabled"in h&&l(3,i=h.disabled)},t.$$.update=()=>{if(t.$$.dirty&30770){if(n&&_){if(v&&typeof u=="string"){let h=document.querySelector(`li[data-value="${u}"]`);h&&v.scrollTo(0,h.offsetTop)}l(12,f=_.getBoundingClientRect().top),l(13,c=window.innerHeight-_.getBoundingClientRect().bottom),l(14,g=_.parentElement?.getBoundingClientRect().height||0)}c>f?(l(6,k=`${g}px`),l(8,m=c),l(7,b=null)):(l(7,b=`${g}px`),l(8,m=f-g),l(6,k=null))}t.$$.dirty&2048&&l(9,s=Array.isArray(u)?u:[u])},[r,n,a,i,_,v,k,b,m,s,p,u,f,c,g,O,E,T]}class Me extends P{constructor(e){super(),Q(this,e,Je,Re,W,{value:11,filtered:0,showOptions:1,activeOption:2,disabled:3})}}function Te(t){let e,l;return{c(){e=Y("svg"),l=Y("path"),w(l,"d","M5 8l4 4 4-4z"),w(e,"class","dropdown-arrow svelte-p5edak"),w(e,"xmlns","http://www.w3.org/2000/svg"),w(e,"width","18"),w(e,"height","18"),w(e,"viewBox","0 0 18 18")},m(s,u){N(s,e,u),y(e,l)},p:G,i:G,o:G,d(s){s&&R(e)}}}class Ie extends P{constructor(e){super(),Q(this,e,null,Te,W,{})}}function Le(t){let e,l;return{c(){e=Y("svg"),l=Y("path"),w(l,"d","M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12z"),w(e,"xmlns","http://www.w3.org/2000/svg"),w(e,"width","16"),w(e,"height","16"),w(e,"viewBox","0 0 24 24")},m(s,u){N(s,e,u),y(e,l)},p:G,i:G,o:G,d(s){s&&R(e)}}}class be extends P{constructor(e){super(),Q(this,e,null,Le,W,{})}}function re(t,e,l){const s=t.slice();return s[30]=e[l],s}function Ue(t){let e;return{c(){e=te(t[1])},m(l,s){N(l,e,s)},p(l,s){s[0]&2&&ne(e,l[1])},d(l){l&&R(e)}}}function _e(t){let e,l,s=t[0],u=[];for(let n=0;nD(u[n],1,1,()=>{u[n]=null});return{c(){for(let n=0;nx(m,"value",M)),m.$on("change",t[14]),{c(){e=S("label"),U(l.$$.fragment),s=J(),u=S("div"),r=S("div"),h&&h.c(),a=J(),i=S("div"),f=S("input"),c=J(),g=S("div"),U(_.$$.fragment),v=J(),U(k.$$.fragment),b=J(),U(m.$$.fragment),w(f,"class","border-none svelte-aqlk7e"),f.disabled=t[4],w(f,"autocomplete","off"),B(f,"subdued",t[0]!==t[7]&&!t[6]),w(g,"class","token-remove remove-all svelte-aqlk7e"),w(g,"title","Clear"),B(g,"hide",!t[3]||!t[0]?.length||t[4]),w(i,"class","secondary-wrap svelte-aqlk7e"),w(r,"class","wrap-inner svelte-aqlk7e"),B(r,"showOptions",t[10]),w(u,"class","wrap svelte-aqlk7e")},m(o,A){N(o,e,A),j(l,e,null),y(e,s),y(e,u),y(u,r),h&&h.m(r,null),y(r,a),y(r,i),y(i,f),oe(f,t[7]),t[22](f),y(i,c),y(i,g),j(_,g,null),y(i,v),j(k,i,null),y(u,b),j(m,u,null),O=!0,E||(T=[L(f,"input",t[21]),L(f,"focus",t[23]),L(f,"keydown",t[15]),L(f,"keyup",t[24]),L(f,"blur",t[25]),L(g,"click",t[13])],E=!0)},p(o,A){const K={};A[0]&32&&(K.show_label=o[5]),A[0]&4&&(K.info=o[2]),A[0]&2|A[1]&4&&(K.$$scope={dirty:A,ctx:o}),l.$set(K),A[0]&9&&(n=o[3]&&Array.isArray(o[0])),n?h?(h.p(o,A),A[0]&9&&q(h,1)):(h=_e(o),h.c(),q(h,1),h.m(r,a)):h&&(ee(),D(h,1,1,()=>{h=null}),le()),(!O||A[0]&16)&&(f.disabled=o[4]),A[0]&128&&f.value!==o[7]&&oe(f,o[7]),(!O||A[0]&193)&&B(f,"subdued",o[0]!==o[7]&&!o[6]),(!O||A[0]&25)&&B(g,"hide",!o[3]||!o[0]?.length||o[4]),(!O||A[0]&1024)&&B(r,"showOptions",o[10]);const I={};A[0]&1024&&(I.showOptions=o[10]),A[0]&512&&(I.filtered=o[9]),A[0]&256&&(I.activeOption=o[8]),A[0]&16&&(I.disabled=o[4]),!p&&A[0]&1&&(p=!0,I.value=o[0],$(()=>p=!1)),m.$set(I)},i(o){O||(q(l.$$.fragment,o),q(h),q(_.$$.fragment,o),q(k.$$.fragment,o),q(m.$$.fragment,o),O=!0)},o(o){D(l.$$.fragment,o),D(h),D(_.$$.fragment,o),D(k.$$.fragment,o),D(m.$$.fragment,o),O=!1},d(o){o&&R(e),z(l),h&&h.d(),t[22](null),z(_),z(k),z(m),E=!1,ye(T)}}}function ze(t,e,l){let s,{label:u}=e,{info:r=void 0}=e,{value:n}=e,a=Array.isArray(n)?n.slice():n,{value_is_output:i=!1}=e,{multiselect:f=!1}=e,{max_choices:c}=e,{choices:g}=e,{disabled:_=!1}=e,{show_label:v}=e,{allow_custom_value:k=!1}=e;const b=he();let m,p,O=!1,E;function T(){b("change",n),i||b("input")}Be(()=>{l(16,i=!1)});function h(d){l(0,n),(!c||n.lengthC!==d)),b("select",{index:g.indexOf(d),value:d,selected:!1})}function H(d){l(0,n=[]),l(7,m=""),d.preventDefault()}function o(d){const C=d.detail.target.dataset.value;if(k&&l(7,m=C),C!==void 0)if(f)n?.includes(C)?M(C):h(C),l(7,m="");else{l(0,n=C),l(7,m=C),l(10,O=!1),b("select",{index:g.indexOf(C),value:C,selected:!0});return}}function A(d){if(d.key==="Enter"&&p!=null)f?f&&Array.isArray(n)&&(n.includes(p)?M(p):h(p),l(7,m="")):(n!==p&&(l(0,n=p),b("select",{index:g.indexOf(n),value:n,selected:!0})),l(7,m=p),l(10,O=!1));else if(l(10,O=!0),d.key==="ArrowUp"||d.key==="ArrowDown"){p===null&&l(8,p=s[0]);const C=d.key==="ArrowUp"?-1:1,Z=s.indexOf(p)+C;l(8,p=Z<0?s[s.length-1]:Z===s.length?s[0]:s[Z]),d.preventDefault()}else d.key==="Escape"?l(10,O=!1):d.key==="Backspace"?f&&(!m||m==="")&&Array.isArray(n)&&n.length>0&&(M(n[n.length-1]),l(7,m="")):l(10,O=!0)}const K=d=>M(d);function I(){m=this.value,l(7,m),l(0,n)}function we(d){V[d?"unshift":"push"](()=>{E=d,l(11,E)})}const ve=()=>{l(10,O=!O),O?l(7,m=""):E.blur()},ke=()=>{k&&l(0,n=m)},pe=()=>{f?l(7,m=""):k||n!==m&&(typeof n=="string"&&m==""?l(7,m=n):(l(0,n=void 0),l(7,m=""))),l(10,O=!1)};function Oe(d){n=d,l(0,n)}return t.$$set=d=>{"label"in d&&l(1,u=d.label),"info"in d&&l(2,r=d.info),"value"in d&&l(0,n=d.value),"value_is_output"in d&&l(16,i=d.value_is_output),"multiselect"in d&&l(3,f=d.multiselect),"max_choices"in d&&l(17,c=d.max_choices),"choices"in d&&l(18,g=d.choices),"disabled"in d&&l(4,_=d.disabled),"show_label"in d&&l(5,v=d.show_label),"allow_custom_value"in d&&l(6,k=d.allow_custom_value)},t.$$.update=()=>{t.$$.dirty[0]&1&&typeof n=="string"&&l(7,m=n),t.$$.dirty[0]&262272&&l(9,s=g.filter(d=>m?d.toLowerCase().includes(m.toLowerCase()):d)),t.$$.dirty[0]&768&&(!p||!s.includes(p))&&l(8,p=s.length?s[0]:null),t.$$.dirty[0]&524289&&JSON.stringify(n)!=JSON.stringify(a)&&(l(19,a=Array.isArray(n)?n.slice():n),T()),t.$$.dirty[0]&524289&&JSON.stringify(n)!=JSON.stringify(a)&&(b("change",n),l(19,a=Array.isArray(n)?n.slice():n))},[n,u,r,f,_,v,k,m,p,s,O,E,M,H,o,A,i,c,g,a,K,I,we,ve,ke,pe,Oe]}class He extends P{constructor(e){super(),Q(this,e,ze,je,W,{label:1,info:2,value:0,value_is_output:16,multiselect:3,max_choices:17,choices:18,disabled:4,show_label:5,allow_custom_value:6},null,[-1,-1])}}function Ke(t){let e,l,s,u,r,n;const a=[t[12]];let i={};for(let _=0;_x(s,"value",f)),V.push(()=>x(s,"value_is_output",c)),s.$on("change",t[17]),s.$on("input",t[18]),s.$on("select",t[19]),s.$on("blur",t[20]),{c(){U(e.$$.fragment),l=J(),U(s.$$.fragment)},m(_,v){j(e,_,v),N(_,l,v),j(s,_,v),n=!0},p(_,v){const k=v&4096?Ce(a,[Se(_[12])]):{};e.$set(k);const b={};v&512&&(b.choices=_[9]),v&128&&(b.multiselect=_[7]),v&256&&(b.max_choices=_[8]),v&4&&(b.label=_[2]),v&8&&(b.info=_[3]),v&1024&&(b.show_label=_[10]),v&8192&&(b.allow_custom_value=_[13]),v&16384&&(b.disabled=_[14]==="static"),!u&&v&1&&(u=!0,b.value=_[0],$(()=>u=!1)),!r&&v&2&&(r=!0,b.value_is_output=_[1],$(()=>r=!1)),s.$set(b)},i(_){n||(q(e.$$.fragment,_),q(s.$$.fragment,_),n=!0)},o(_){D(e.$$.fragment,_),D(s.$$.fragment,_),n=!1},d(_){z(e,_),_&&R(l),z(s,_)}}}function Fe(t){let e,l;return e=new Ee({props:{visible:t[6],elem_id:t[4],elem_classes:t[5],disable:typeof t[11].container=="boolean"&&!t[11].container,$$slots:{default:[Ke]},$$scope:{ctx:t}}}),{c(){U(e.$$.fragment)},m(s,u){j(e,s,u),l=!0},p(s,[u]){const r={};u&64&&(r.visible=s[6]),u&16&&(r.elem_id=s[4]),u&32&&(r.elem_classes=s[5]),u&2048&&(r.disable=typeof s[11].container=="boolean"&&!s[11].container),u&2127759&&(r.$$scope={dirty:u,ctx:s}),e.$set(r)},i(s){l||(q(e.$$.fragment,s),l=!0)},o(s){D(e.$$.fragment,s),l=!1},d(s){z(e,s)}}}function Ge(t,e,l){let{label:s="Dropdown"}=e,{info:u=void 0}=e,{elem_id:r=""}=e,{elem_classes:n=[]}=e,{visible:a=!0}=e,{value:i}=e,{value_is_output:f=!1}=e,{multiselect:c=!1}=e,{max_choices:g}=e,{choices:_}=e,{show_label:v}=e,{style:k={}}=e,{loading_status:b}=e,{allow_custom_value:m=!1}=e,{mode:p}=e;c&&!i?i=[]:i||(i="");function O(o){i=o,l(0,i)}function E(o){f=o,l(1,f)}function T(o){X.call(this,t,o)}function h(o){X.call(this,t,o)}function M(o){X.call(this,t,o)}function H(o){X.call(this,t,o)}return t.$$set=o=>{"label"in o&&l(2,s=o.label),"info"in o&&l(3,u=o.info),"elem_id"in o&&l(4,r=o.elem_id),"elem_classes"in o&&l(5,n=o.elem_classes),"visible"in o&&l(6,a=o.visible),"value"in o&&l(0,i=o.value),"value_is_output"in o&&l(1,f=o.value_is_output),"multiselect"in o&&l(7,c=o.multiselect),"max_choices"in o&&l(8,g=o.max_choices),"choices"in o&&l(9,_=o.choices),"show_label"in o&&l(10,v=o.show_label),"style"in o&&l(11,k=o.style),"loading_status"in o&&l(12,b=o.loading_status),"allow_custom_value"in o&&l(13,m=o.allow_custom_value),"mode"in o&&l(14,p=o.mode)},[i,f,s,u,r,n,a,c,g,_,v,k,b,m,p,O,E,T,h,M,H]}class Ve extends P{constructor(e){super(),Q(this,e,Ge,Fe,W,{label:2,info:3,elem_id:4,elem_classes:5,visible:6,value:0,value_is_output:1,multiselect:7,max_choices:8,choices:9,show_label:10,style:11,loading_status:12,allow_custom_value:13,mode:14})}}const xe=Ve,$e=["static","dynamic"],el=t=>({type:{payload:"string"},description:{payload:"selected choice"},example_data:t.choices.length?t.choices[0]:""});export{xe as Component,el as document,$e as modes}; -//# sourceMappingURL=index-50e85c2b.js.map diff --git a/spaces/lambdaofgod/huggingface_explorer/app.py b/spaces/lambdaofgod/huggingface_explorer/app.py deleted file mode 100644 index ea55caffedd603f0b8b8cd9570671c426b094fc5..0000000000000000000000000000000000000000 --- a/spaces/lambdaofgod/huggingface_explorer/app.py +++ /dev/null @@ -1,76 +0,0 @@ -import pandas as pd -import streamlit as st -import math - - -class ModelFinder: - def __init__(self, models_df): - self.setup_inputs() - self.models_df = models_df - self.n_per_page = 10 - - def setup_page(self): - st.title("Huggingface model explorer") - st.text(f"search {len(models_df)} models by name or readme") - st.text( - "note that there are many more models but here we only show those with readme" - ) - - def setup_inputs(self): - col1, col2, col3, col4, col5 = st.columns(5) - self.query_input = col1.text_input("model name query", value="") - self.author_query_input = col2.text_input("author query", value="") - self.id_query_input = col3.text_input("modelId query", value="") - self.readme_query_input = col4.text_input("readme query", value="") - self.page = col5 - - def get_selected_models_df(self, query, readme_query, id_query, author_query): - return self.models_df[ - self.models_df["readme"].str.lower().str.contains(readme_query) - & self.models_df["modelId"].str.lower().str.contains(id_query) - & self.models_df["author"].str.lower().str.contains(author_query) - & self.models_df["model_name"].str.lower().str.contains(query) - ] - - def show_paged_selected_model_info(self, selected_models_df): - page = self.page.number_input("page", 0, math.ceil(len(selected_models_df) / 10)) - selected_models_df_subset = selected_models_df.iloc[ - page * self.n_per_page : (page + 1) * self.n_per_page - ] - st.write(f"found {len(selected_models_df)} models") - for (model_name, tag, readme) in selected_models_df_subset[ - ["modelId", "pipeline_tag", "readme"] - ].itertuples(index=False): - model_url = f"http://huggingface.co/{model_name}" - with st.expander(f"[{model_name}]({model_url}) ({tag})"): - st.write(readme) - - def run(self): - self.setup_page() - selected_models_df = self.get_selected_models_df( - self.query_input, - self.readme_query_input, - self.id_query_input, - self.author_query_input, - ) - self.show_paged_selected_model_info(selected_models_df) - - -def prepare_models_df(path): - df = pd.read_parquet(path).dropna(subset=["readme"]) - sep_tuples = [ - tp if len(tp) == 2 else ("", tp[0]) - for tp in df["modelId"].str.split("/").to_list() - ] - authors, model_names = zip(*sep_tuples) - df["author"] = authors - df["model_name"] = model_names - return df - - -model_path = "models_with_readmes.parquet" -models_df = prepare_models_df(model_path) - -app = ModelFinder(models_df) - -app.run() diff --git a/spaces/langvision/ChatWeb/_next/static/chunks/698-f6bc8e9278737c93.js b/spaces/langvision/ChatWeb/_next/static/chunks/698-f6bc8e9278737c93.js deleted file mode 100644 index f8219f8c6d7cf299958256ed0d71b1f484a43b92..0000000000000000000000000000000000000000 --- a/spaces/langvision/ChatWeb/_next/static/chunks/698-f6bc8e9278737c93.js +++ /dev/null @@ -1,25 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[698],{93644:function(){"trimStart"in String.prototype||(String.prototype.trimStart=String.prototype.trimLeft),"trimEnd"in String.prototype||(String.prototype.trimEnd=String.prototype.trimRight),"description"in Symbol.prototype||Object.defineProperty(Symbol.prototype,"description",{configurable:!0,get:function(){var e=/\((.*)\)/.exec(this.toString());return e?e[1]:void 0}}),Array.prototype.flat||(Array.prototype.flat=function(e,t){return t=this.concat.apply([],this),e>1&&t.some(Array.isArray)?t.flat(e-1):t},Array.prototype.flatMap=function(e,t){return this.map(e,t).flat()}),Promise.prototype.finally||(Promise.prototype.finally=function(e){if("function"!=typeof e)return this.then(e,e);var t=this.constructor||Promise;return this.then(function(r){return t.resolve(e()).then(function(){return r})},function(r){return t.resolve(e()).then(function(){throw r})})}),Object.fromEntries||(Object.fromEntries=function(e){return Array.from(e).reduce(function(e,t){return e[t[0]]=t[1],e},{})})},12409:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"addBasePath",{enumerable:!0,get:function(){return o}});let n=r(60150),u=r(75588);function o(e,t){return(0,u.normalizePathTrailingSlash)((0,n.addPathPrefix)(e,""))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},30930:function(e,t){"use strict";function r(e){var t,r;t=self.__next_s,r=()=>{e()},t&&t.length?t.reduce((e,t)=>{let[r,n]=t;return e.then(()=>new Promise((e,t)=>{let u=document.createElement("script");if(n)for(let e in n)"children"!==e&&u.setAttribute(e,n[e]);r?(u.src=r,u.onload=()=>e(),u.onerror=t):n&&(u.innerHTML=n.children,setTimeout(e)),document.head.appendChild(u)}))},Promise.resolve()).catch(e=>{console.error(e)}).then(()=>{r()}):r()}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"appBootstrap",{enumerable:!0,get:function(){return r}}),window.next={version:"13.4.9",appDir:!0},("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},303:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"callServer",{enumerable:!0,get:function(){return u}});let n=r(2353);async function u(e,t){let r=(0,n.getServerActionDispatcher)();if(!r)throw Error("Invariant: missing action dispatcher.");return new Promise((n,u)=>{r({actionId:e,actionArgs:t,resolve:n,reject:u})})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},13426:function(e,t,r){"use strict";let n,u;Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"hydrate",{enumerable:!0,get:function(){return N}});let o=r(26927),l=r(25909);r(93644);let a=o._(r(93194)),i=l._(r(86006)),c=r(35456),s=r(27268);r(15456);let f=o._(r(59214)),d=r(303),p=r(45080),h=window.console.error;window.console.error=function(){for(var e=arguments.length,t=Array(e),r=0;r{if((0,p.isNextRouterError)(e.error)){e.preventDefault();return}});let _=e=>t=>e(t)+"",y=r.u,b={};r.u=_(e=>encodeURI(b[e]||y(e)));let v=r.k;r.k=_(v);let m=r.miniCssF;r.miniCssF=_(m),self.__next_require__=r,self.__next_chunk_load__=e=>{if(!e)return Promise.resolve();let[t,n]=e.split(":");return b[t]=n,r.e(t)};let g=document,O=()=>{let{pathname:e,search:t}=location;return e+t},P=new TextEncoder,E=!1,R=!1;function j(e){if(0===e[0])n=[];else{if(!n)throw Error("Unexpected server data: missing bootstrap script.");u?u.enqueue(P.encode(e[1])):n.push(e[1])}}let S=function(){u&&!R&&(u.close(),R=!0,n=void 0),E=!0};"loading"===document.readyState?document.addEventListener("DOMContentLoaded",S,!1):S();let T=self.__next_f=self.__next_f||[];T.forEach(j),T.push=j;let M=new Map;function w(e){let{cacheKey:t}=e;i.default.useEffect(()=>{M.delete(t)});let r=function(e){let t=M.get(e);if(t)return t;let r=new ReadableStream({start(e){n&&(n.forEach(t=>{e.enqueue(P.encode(t))}),E&&!R&&(e.close(),R=!0,n=void 0)),u=e}}),o=(0,c.createFromReadableStream)(r,{callServer:d.callServer});return M.set(e,o),o}(t),o=(0,i.use)(r);return o}let C=i.default.Fragment;function x(e){let{children:t}=e;return t}function A(e){return i.default.createElement(w,{...e,cacheKey:O()})}function N(){let e=i.default.createElement(C,null,i.default.createElement(s.HeadManagerContext.Provider,{value:{appDir:!0}},i.default.createElement(x,null,i.default.createElement(A,null)))),t={onRecoverableError:f.default},r="__next_error__"===document.documentElement.id;r?a.default.createRoot(g,t).render(e):i.default.startTransition(()=>a.default.hydrateRoot(g,e,t))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},53333:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0});let n=r(30930);(0,n.appBootstrap)(()=>{r(2353),r(49180);let{hydrate:e}=r(13426);e()}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},71002:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"AppRouterAnnouncer",{enumerable:!0,get:function(){return l}});let n=r(86006),u=r(8431),o="next-route-announcer";function l(e){let{tree:t}=e,[r,l]=(0,n.useState)(null);(0,n.useEffect)(()=>{let e=function(){var e;let t=document.getElementsByName(o)[0];if(null==t?void 0:null==(e=t.shadowRoot)?void 0:e.childNodes[0])return t.shadowRoot.childNodes[0];{let e=document.createElement(o);e.style.cssText="position:absolute";let t=document.createElement("div");t.ariaLive="assertive",t.id="__next-route-announcer__",t.role="alert",t.style.cssText="position:absolute;border:0;height:1px;margin:-1px;padding:0;width:1px;clip:rect(0 0 0 0);overflow:hidden;white-space:nowrap;word-wrap:normal";let r=e.attachShadow({mode:"open"});return r.appendChild(t),document.body.appendChild(e),t}}();return l(e),()=>{let e=document.getElementsByTagName(o)[0];(null==e?void 0:e.isConnected)&&document.body.removeChild(e)}},[]);let[a,i]=(0,n.useState)(""),c=(0,n.useRef)();return(0,n.useEffect)(()=>{let e="";if(document.title)e=document.title;else{let t=document.querySelector("h1");t&&(e=t.innerText||t.textContent||"")}void 0!==c.current&&i(e),c.current=e},[t]),r?(0,u.createPortal)(a,r):null}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},34852:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{RSC:function(){return r},ACTION:function(){return n},NEXT_ROUTER_STATE_TREE:function(){return u},NEXT_ROUTER_PREFETCH:function(){return o},NEXT_URL:function(){return l},FETCH_CACHE_HEADER:function(){return a},RSC_CONTENT_TYPE_HEADER:function(){return i},RSC_VARY_HEADER:function(){return c},FLIGHT_PARAMETERS:function(){return s},NEXT_RSC_UNION_QUERY:function(){return f}});let r="RSC",n="Next-Action",u="Next-Router-State-Tree",o="Next-Router-Prefetch",l="Next-Url",a="x-vercel-sc-headers",i="text/x-component",c=r+", "+u+", "+o,s=[[r],[u],[o]],f="_rsc";("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},2353:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{getServerActionDispatcher:function(){return E},urlToUrlWithoutFlightMarker:function(){return R},default:function(){return w}});let n=r(25909),u=n._(r(86006)),o=r(15456),l=r(85426),a=r(74741),i=r(8744),c=r(76173),s=r(18688),f=r(47330),d=r(89343),p=r(30753),h=r(12409),_=r(71002),y=r(22418),b=r(62484),v=r(68792),m=r(75238),g=r(34852),O=new Map,P=null;function E(){return P}function R(e){let t=new URL(e,location.origin);return t.searchParams.delete(g.NEXT_RSC_UNION_QUERY),t.pathname.endsWith("/index.txt")?t.pathname=t.pathname.slice(0,-10):t.pathname=t.pathname.slice(0,-4),t}function j(e){return e.origin!==window.location.origin}function S(e){let{tree:t,pushRef:r,canonicalUrl:n,sync:o}=e;return(0,u.useInsertionEffect)(()=>{let e={__NA:!0,tree:t};r.pendingPush&&(0,i.createHrefFromUrl)(new URL(window.location.href))!==n?(r.pendingPush=!1,window.history.pushState(e,"",n)):window.history.replaceState(e,"",n),o()},[t,r,n,o]),null}let T=()=>({status:o.CacheStates.LAZY_INITIALIZED,data:null,subTreeData:null,parallelRoutes:new Map});function M(e){let{buildId:t,initialHead:r,initialTree:n,initialCanonicalUrl:i,children:f,assetPrefix:g,notFound:E,notFoundStyles:R,asNotFound:M}=e,w=(0,u.useMemo)(()=>(0,d.createInitialRouterState)({buildId:t,children:f,initialCanonicalUrl:i,initialTree:n,initialParallelRoutes:O,isServer:!1,location:window.location,initialHead:r}),[t,f,i,n,r]),[{tree:C,cache:x,prefetchCache:A,pushRef:N,focusAndScrollRef:I,canonicalUrl:D,nextUrl:k},F,U]=(0,s.useReducerWithReduxDevtools)(l.reducer,w);(0,u.useEffect)(()=>{O=null},[]);let{searchParams:L,pathname:H}=(0,u.useMemo)(()=>{let e=new URL(D,window.location.href);return{searchParams:e.searchParams,pathname:e.pathname}},[D]),$=(0,u.useCallback)((e,t,r)=>{(0,u.startTransition)(()=>{F({type:a.ACTION_SERVER_PATCH,flightData:t,previousTree:e,overrideCanonicalUrl:r,cache:T(),mutable:{}})})},[F]),W=(0,u.useCallback)((e,t,r,n)=>{let u=new URL((0,h.addBasePath)(e),location.href);return F({type:a.ACTION_NAVIGATE,url:u,isExternalUrl:j(u),locationSearch:location.search,forceOptimisticNavigation:r,shouldScroll:null==n||n,navigateType:t,cache:T(),mutable:{}})},[F]);!function(e,t,r){let n=(0,u.useCallback)(n=>{(0,u.startTransition)(()=>{t({...n,type:a.ACTION_SERVER_ACTION,mutable:{},navigate:r,changeByServerResponse:e})})},[e,t,r]);P=n}($,F,W);let B=(0,u.useMemo)(()=>{let e={back:()=>window.history.back(),forward:()=>window.history.forward(),prefetch:(e,t)=>{if((0,p.isBot)(window.navigator.userAgent))return;let r=new URL((0,h.addBasePath)(e),location.href);j(r)||(0,u.startTransition)(()=>{var e;F({type:a.ACTION_PREFETCH,url:r,kind:null!=(e=null==t?void 0:t.kind)?e:a.PrefetchKind.FULL})})},replace:(e,t)=>{void 0===t&&(t={}),(0,u.startTransition)(()=>{var r;W(e,"replace",!!t.forceOptimisticNavigation,null==(r=t.scroll)||r)})},push:(e,t)=>{void 0===t&&(t={}),(0,u.startTransition)(()=>{var r;W(e,"push",!!t.forceOptimisticNavigation,null==(r=t.scroll)||r)})},refresh:()=>{(0,u.startTransition)(()=>{F({type:a.ACTION_REFRESH,cache:T(),mutable:{},origin:window.location.origin})})},fastRefresh:()=>{throw Error("fastRefresh can only be used in development mode. Please use refresh instead.")}};return e},[F,W]);if((0,u.useEffect)(()=>{window.next&&(window.next.router=B)},[B]),N.mpaNavigation){let e=window.location;N.pendingPush?e.assign(D):e.replace(D),(0,u.use)((0,m.createInfinitePromise)())}let Y=(0,u.useCallback)(e=>{let{state:t}=e;if(t){if(!t.__NA){window.location.reload();return}(0,u.startTransition)(()=>{F({type:a.ACTION_RESTORE,url:new URL(window.location.href),tree:t.tree})})}},[F]);(0,u.useEffect)(()=>(window.addEventListener("popstate",Y),()=>{window.removeEventListener("popstate",Y)}),[Y]);let V=(0,u.useMemo)(()=>(0,v.findHeadInCache)(x,C[1]),[x,C]),G=u.default.createElement(y.RedirectBoundary,null,V,x.subTreeData,u.default.createElement(_.AppRouterAnnouncer,{tree:C}));return u.default.createElement(u.default.Fragment,null,u.default.createElement(S,{tree:C,pushRef:N,canonicalUrl:D,sync:U}),u.default.createElement(c.PathnameContext.Provider,{value:H},u.default.createElement(c.SearchParamsContext.Provider,{value:L},u.default.createElement(o.GlobalLayoutRouterContext.Provider,{value:{buildId:t,changeByServerResponse:$,tree:C,focusAndScrollRef:I,nextUrl:k}},u.default.createElement(o.AppRouterContext.Provider,{value:B},u.default.createElement(o.LayoutRouterContext.Provider,{value:{childNodes:x.parallelRoutes,tree:C,url:D}},u.default.createElement(b.NotFoundBoundary,{notFound:E,notFoundStyles:R,asNotFound:M},G)))))))}function w(e){let{globalErrorComponent:t,...r}=e;return u.default.createElement(f.ErrorBoundary,{errorComponent:t},u.default.createElement(M,r))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},90259:function(e,t,r){"use strict";function n(e){}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"clientHookInServerComponentError",{enumerable:!0,get:function(){return n}}),r(26927),r(86006),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},47330:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{ErrorBoundaryHandler:function(){return a},default:function(){return i},ErrorBoundary:function(){return c}});let n=r(26927),u=n._(r(86006)),o=r(4e3),l={error:{fontFamily:'system-ui,"Segoe UI",Roboto,Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji"',height:"100vh",textAlign:"center",display:"flex",flexDirection:"column",alignItems:"center",justifyContent:"center"},text:{fontSize:"14px",fontWeight:400,lineHeight:"28px",margin:"0 8px"}};class a extends u.default.Component{static getDerivedStateFromError(e){return{error:e}}static getDerivedStateFromProps(e,t){return e.pathname!==t.previousPathname&&t.error?{error:null,previousPathname:e.pathname}:{error:t.error,previousPathname:e.pathname}}render(){return this.state.error?u.default.createElement(u.default.Fragment,null,this.props.errorStyles,u.default.createElement(this.props.errorComponent,{error:this.state.error,reset:this.reset})):this.props.children}constructor(e){super(e),this.reset=()=>{this.setState({error:null})},this.state={error:null,previousPathname:this.props.pathname}}}function i(e){let{error:t}=e,r=null==t?void 0:t.digest;return u.default.createElement("html",null,u.default.createElement("head",null),u.default.createElement("body",null,u.default.createElement("div",{style:l.error},u.default.createElement("div",null,u.default.createElement("h2",{style:l.text},"Application error: a "+(r?"server":"client")+"-side exception has occurred (see the "+(r?"server logs":"browser console")+" for more information)."),r?u.default.createElement("p",{style:l.text},"Digest: "+r):null))))}function c(e){let{errorComponent:t,errorStyles:r,children:n}=e,l=(0,o.usePathname)();return t?u.default.createElement(a,{pathname:l,errorComponent:t,errorStyles:r},n):u.default.createElement(u.default.Fragment,null,n)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},47308:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{DYNAMIC_ERROR_CODE:function(){return r},DynamicServerError:function(){return n}});let r="DYNAMIC_SERVER_USAGE";class n extends Error{constructor(e){super("Dynamic server usage: "+e),this.digest=r}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},75238:function(e,t){"use strict";let r;function n(){return r||(r=new Promise(()=>{})),r}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createInfinitePromise",{enumerable:!0,get:function(){return n}}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},45080:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"isNextRouterError",{enumerable:!0,get:function(){return o}});let n=r(62951),u=r(14024);function o(e){return e&&e.digest&&((0,u.isRedirectError)(e)||(0,n.isNotFoundError)(e))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},49180:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return E}});let n=r(26927),u=r(25909),o=u._(r(86006)),l=n._(r(8431)),a=r(15456),i=r(52368),c=r(75238),s=r(47330),f=r(50655),d=r(92998),p=r(22418),h=r(62484),_=r(65143),y=r(49101),b=["bottom","height","left","right","top","width","x","y"];function v(e,t){let r=e.getBoundingClientRect();return r.top>=0&&r.top<=t}class m extends o.default.Component{componentDidMount(){this.handlePotentialScroll()}componentDidUpdate(){this.props.focusAndScrollRef.apply&&this.handlePotentialScroll()}render(){return this.props.children}constructor(...e){super(...e),this.handlePotentialScroll=()=>{let{focusAndScrollRef:e,segmentPath:t}=this.props;if(e.apply){var r;if(0!==e.segmentPaths.length&&!e.segmentPaths.some(e=>t.every((t,r)=>(0,f.matchSegment)(t,e[r]))))return;let n=null,u=e.hashFragment;if(u&&(n="top"===u?document.body:null!=(r=document.getElementById(u))?r:document.getElementsByName(u)[0]),n||(n=l.default.findDOMNode(this)),!(n instanceof Element))return;for(;!(n instanceof HTMLElement)||function(e){let t=e.getBoundingClientRect();return b.every(e=>0===t[e])}(n);){if(null===n.nextElementSibling)return;n=n.nextElementSibling}e.apply=!1,e.hashFragment=null,e.segmentPaths=[],(0,d.handleSmoothScroll)(()=>{if(u){n.scrollIntoView();return}let e=document.documentElement,t=e.clientHeight;!v(n,t)&&(e.scrollTop=0,v(n,t)||n.scrollIntoView())},{dontForceLayout:!0}),n.focus()}}}}function g(e){let{segmentPath:t,children:r}=e,n=(0,o.useContext)(a.GlobalLayoutRouterContext);if(!n)throw Error("invariant global layout router not mounted");return o.default.createElement(m,{segmentPath:t,focusAndScrollRef:n.focusAndScrollRef},r)}function O(e){let{parallelRouterKey:t,url:r,childNodes:n,childProp:u,segmentPath:l,tree:s,cacheKey:d}=e,p=(0,o.useContext)(a.GlobalLayoutRouterContext);if(!p)throw Error("invariant global layout router not mounted");let{buildId:h,changeByServerResponse:_,tree:y}=p,b=n.get(d);if(u&&null!==u.current&&(b?b.status===a.CacheStates.LAZY_INITIALIZED&&(b.status=a.CacheStates.READY,b.subTreeData=u.current):(b={status:a.CacheStates.READY,data:null,subTreeData:u.current,parallelRoutes:new Map},n.set(d,b))),!b||b.status===a.CacheStates.LAZY_INITIALIZED){let e=function e(t,r){if(t){let[n,u]=t,o=2===t.length;if((0,f.matchSegment)(r[0],n)&&r[1].hasOwnProperty(u)){if(o){let t=e(void 0,r[1][u]);return[r[0],{...r[1],[u]:[t[0],t[1],t[2],"refetch"]}]}return[r[0],{...r[1],[u]:e(t.slice(2),r[1][u])}]}}return r}(["",...l],y);b={status:a.CacheStates.DATA_FETCH,data:(0,i.fetchServerResponse)(new URL(r,location.origin),e,p.nextUrl,h),subTreeData:null,head:b&&b.status===a.CacheStates.LAZY_INITIALIZED?b.head:void 0,parallelRoutes:b&&b.status===a.CacheStates.LAZY_INITIALIZED?b.parallelRoutes:new Map},n.set(d,b)}if(!b)throw Error("Child node should always exist");if(b.subTreeData&&b.data)throw Error("Child node should not have both subTreeData and data");if(b.data){let[e,t]=(0,o.use)(b.data);b.data=null,setTimeout(()=>{(0,o.startTransition)(()=>{_(y,e,t)})}),(0,o.use)((0,c.createInfinitePromise)())}b.subTreeData||(0,o.use)((0,c.createInfinitePromise)());let v=o.default.createElement(a.LayoutRouterContext.Provider,{value:{tree:s[1][t],childNodes:b.parallelRoutes,url:r}},b.subTreeData);return v}function P(e){let{children:t,loading:r,loadingStyles:n,hasLoading:u}=e;return u?o.default.createElement(o.Suspense,{fallback:o.default.createElement(o.default.Fragment,null,n,r)},t):o.default.createElement(o.default.Fragment,null,t)}function E(e){let{parallelRouterKey:t,segmentPath:r,childProp:n,error:u,errorStyles:l,templateStyles:i,loading:c,loadingStyles:d,hasLoading:b,template:v,notFound:m,notFoundStyles:E,asNotFound:R,styles:j}=e,S=(0,o.useContext)(a.LayoutRouterContext);if(!S)throw Error("invariant expected layout router to be mounted");let{childNodes:T,tree:M,url:w}=S,C=T.get(t);C||(C=new Map,T.set(t,C));let x=M[1][t][0],A=n.segment,N=(0,_.getSegmentValue)(x),I=[x];return o.default.createElement(o.default.Fragment,null,j,I.map(e=>{let j=(0,f.matchSegment)(e,A),S=(0,_.getSegmentValue)(e),T=(0,y.createRouterCacheKey)(e);return o.default.createElement(a.TemplateContext.Provider,{key:(0,y.createRouterCacheKey)(e,!0),value:o.default.createElement(g,{segmentPath:r},o.default.createElement(s.ErrorBoundary,{errorComponent:u,errorStyles:l},o.default.createElement(P,{hasLoading:b,loading:c,loadingStyles:d},o.default.createElement(h.NotFoundBoundary,{notFound:m,notFoundStyles:E,asNotFound:R},o.default.createElement(p.RedirectBoundary,null,o.default.createElement(O,{parallelRouterKey:t,url:w,tree:M,childNodes:C,childProp:j?n:null,segmentPath:r,cacheKey:T,isActive:N===S}))))))},i,v)}))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},50655:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{matchSegment:function(){return u},canSegmentBeOverridden:function(){return o}});let n=r(24778),u=(e,t)=>"string"==typeof e?"string"==typeof t&&e===t:"string"!=typeof t&&e[0]===t[0]&&e[1]===t[1],o=(e,t)=>{var r;return!Array.isArray(e)&&!!Array.isArray(t)&&(null==(r=(0,n.getSegmentParam)(e))?void 0:r.param)===t[0]};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4e3:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{ReadonlyURLSearchParams:function(){return p},useSearchParams:function(){return h},usePathname:function(){return _},ServerInsertedHTMLContext:function(){return i.ServerInsertedHTMLContext},useServerInsertedHTML:function(){return i.useServerInsertedHTML},useRouter:function(){return y},useParams:function(){return b},useSelectedLayoutSegments:function(){return v},useSelectedLayoutSegment:function(){return m},redirect:function(){return c.redirect},notFound:function(){return s.notFound}});let n=r(86006),u=r(15456),o=r(76173),l=r(90259),a=r(65143),i=r(73476),c=r(14024),s=r(62951),f=Symbol("internal for urlsearchparams readonly");function d(){return Error("ReadonlyURLSearchParams cannot be modified")}class p{[Symbol.iterator](){return this[f][Symbol.iterator]()}append(){throw d()}delete(){throw d()}set(){throw d()}sort(){throw d()}constructor(e){this[f]=e,this.entries=e.entries.bind(e),this.forEach=e.forEach.bind(e),this.get=e.get.bind(e),this.getAll=e.getAll.bind(e),this.has=e.has.bind(e),this.keys=e.keys.bind(e),this.values=e.values.bind(e),this.toString=e.toString.bind(e)}}function h(){(0,l.clientHookInServerComponentError)("useSearchParams");let e=(0,n.useContext)(o.SearchParamsContext),t=(0,n.useMemo)(()=>e?new p(e):null,[e]);return t}function _(){return(0,l.clientHookInServerComponentError)("usePathname"),(0,n.useContext)(o.PathnameContext)}function y(){(0,l.clientHookInServerComponentError)("useRouter");let e=(0,n.useContext)(u.AppRouterContext);if(null===e)throw Error("invariant expected app router to be mounted");return e}function b(){(0,l.clientHookInServerComponentError)("useParams");let e=(0,n.useContext)(u.GlobalLayoutRouterContext);return e?function e(t,r){void 0===r&&(r={});let n=t[1];for(let t of Object.values(n)){let n=t[0],u=Array.isArray(n),o=u?n[1]:n;!o||o.startsWith("__PAGE__")||(u&&(r[n[0]]=n[1]),r=e(t,r))}return r}(e.tree):null}function v(e){void 0===e&&(e="children"),(0,l.clientHookInServerComponentError)("useSelectedLayoutSegments");let{tree:t}=(0,n.useContext)(u.LayoutRouterContext);return function e(t,r,n,u){let o;if(void 0===n&&(n=!0),void 0===u&&(u=[]),n)o=t[1][r];else{var l;let e=t[1];o=null!=(l=e.children)?l:Object.values(e)[0]}if(!o)return u;let i=o[0],c=(0,a.getSegmentValue)(i);return!c||c.startsWith("__PAGE__")?u:(u.push(c),e(o,r,!1,u))}(t,e)}function m(e){void 0===e&&(e="children"),(0,l.clientHookInServerComponentError)("useSelectedLayoutSegment");let t=v(e);return 0===t.length?null:t[0]}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},62484:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"NotFoundBoundary",{enumerable:!0,get:function(){return a}});let n=r(26927),u=n._(r(86006)),o=r(4e3);class l extends u.default.Component{static getDerivedStateFromError(e){if((null==e?void 0:e.digest)==="NEXT_NOT_FOUND")return{notFoundTriggered:!0};throw e}static getDerivedStateFromProps(e,t){return e.pathname!==t.previousPathname&&t.notFoundTriggered?{notFoundTriggered:!1,previousPathname:e.pathname}:{notFoundTriggered:t.notFoundTriggered,previousPathname:e.pathname}}render(){return this.state.notFoundTriggered?u.default.createElement(u.default.Fragment,null,u.default.createElement("meta",{name:"robots",content:"noindex"}),this.props.notFoundStyles,this.props.notFound):this.props.children}constructor(e){super(e),this.state={notFoundTriggered:!!e.asNotFound,previousPathname:e.pathname}}}function a(e){let{notFound:t,notFoundStyles:r,asNotFound:n,children:a}=e,i=(0,o.usePathname)();return t?u.default.createElement(l,{pathname:i,notFound:t,notFoundStyles:r,asNotFound:n},a):u.default.createElement(u.default.Fragment,null,a)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},62951:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{notFound:function(){return n},isNotFoundError:function(){return u}});let r="NEXT_NOT_FOUND";function n(){let e=Error(r);throw e.digest=r,e}function u(e){return(null==e?void 0:e.digest)===r}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},22418:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{RedirectErrorBoundary:function(){return i},RedirectBoundary:function(){return c}});let n=r(25909),u=n._(r(86006)),o=r(4e3),l=r(14024);function a(e){let{redirect:t,reset:r,redirectType:n}=e,a=(0,o.useRouter)();return(0,u.useEffect)(()=>{u.default.startTransition(()=>{n===l.RedirectType.push?a.push(t,{}):a.replace(t,{}),r()})},[t,n,r,a]),null}class i extends u.default.Component{static getDerivedStateFromError(e){if((0,l.isRedirectError)(e)){let t=(0,l.getURLFromRedirectError)(e),r=(0,l.getRedirectTypeFromError)(e);return{redirect:t,redirectType:r}}throw e}render(){let{redirect:e,redirectType:t}=this.state;return null!==e&&null!==t?u.default.createElement(a,{redirect:e,redirectType:t,reset:()=>this.setState({redirect:null})}):this.props.children}constructor(e){super(e),this.state={redirect:null,redirectType:null}}}function c(e){let{children:t}=e,r=(0,o.useRouter)();return u.default.createElement(i,{router:r},t)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},14024:function(e,t,r){"use strict";var n,u;Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{RedirectType:function(){return n},getRedirectError:function(){return a},redirect:function(){return i},isRedirectError:function(){return c},getURLFromRedirectError:function(){return s},getRedirectTypeFromError:function(){return f}});let o=r(24437),l="NEXT_REDIRECT";function a(e,t){let r=Error(l);r.digest=l+";"+t+";"+e;let n=o.requestAsyncStorage.getStore();return n&&(r.mutableCookies=n.mutableCookies),r}function i(e,t){throw void 0===t&&(t="replace"),a(e,t)}function c(e){if("string"!=typeof(null==e?void 0:e.digest))return!1;let[t,r,n]=e.digest.split(";",3);return t===l&&("replace"===r||"push"===r)&&"string"==typeof n}function s(e){return c(e)?e.digest.split(";",3)[2]:null}function f(e){if(!c(e))throw Error("Not a redirect error");return e.digest.split(";",3)[1]}(u=n||(n={})).push="push",u.replace="replace",("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},92306:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return l}});let n=r(25909),u=n._(r(86006)),o=r(15456);function l(){let e=(0,u.useContext)(o.TemplateContext);return u.default.createElement(u.default.Fragment,null,e)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},68654:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"applyFlightData",{enumerable:!0,get:function(){return l}});let n=r(15456),u=r(90743),o=r(23033);function l(e,t,r,l){void 0===l&&(l=!1);let[a,i,c]=r.slice(-3);return null!==i&&(3===r.length?(t.status=n.CacheStates.READY,t.subTreeData=i,(0,u.fillLazyItemsTillLeafWithHead)(t,e,a,c,l)):(t.status=n.CacheStates.READY,t.subTreeData=e.subTreeData,t.parallelRoutes=new Map(e.parallelRoutes),(0,o.fillCacheWithNewSubTreeData)(t,e,r,l)),!0)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},76031:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"applyRouterStatePatchToTree",{enumerable:!0,get:function(){return function e(t,r,o){let l;let[a,i,,,c]=r;if(1===t.length){let e=u(r,o);return e}let[s,f]=t;if(!(0,n.matchSegment)(s,a))return null;let d=2===t.length;if(d)l=u(i[f],o);else if(null===(l=e(t.slice(2),i[f],o)))return null;let p=[t[0],{...i,[f]:l}];return c&&(p[4]=!0),p}}});let n=r(50655);function u(e,t){let[r,o]=e,[l,a]=t;if("__DEFAULT__"===l&&"__DEFAULT__"!==r)return e;if((0,n.matchSegment)(r,l)){let t={};for(let e in o){let r=void 0!==a[e];r?t[e]=u(o[e],a[e]):t[e]=o[e]}for(let e in a)t[e]||(t[e]=a[e]);let n=[r,t];return e[2]&&(n[2]=e[2]),e[3]&&(n[3]=e[3]),e[4]&&(n[4]=e[4]),n}return t}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},41781:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{extractPathFromFlightRouterState:function(){return a},computeChangedPath:function(){return i}});let n=r(47399),u=r(50655),o=e=>"string"==typeof e?e:e[1];function l(e){return e.split("/").reduce((e,t)=>""===t||t.startsWith("(")&&t.endsWith(")")?e:e+"/"+t,"")||"/"}function a(e){var t;let r=Array.isArray(e[0])?e[0][1]:e[0];if("__DEFAULT__"===r||n.INTERCEPTION_ROUTE_MARKERS.some(e=>r.startsWith(e)))return;if(r.startsWith("__PAGE__"))return"";let u=[r],o=null!=(t=e[1])?t:{},i=o.children?a(o.children):void 0;if(void 0!==i)u.push(i);else for(let[e,t]of Object.entries(o)){if("children"===e)continue;let r=a(t);void 0!==r&&u.push(r)}return l(u.join("/"))}function i(e,t){let r=function e(t,r){let[l,i]=t,[c,s]=r,f=o(l),d=o(c);if(n.INTERCEPTION_ROUTE_MARKERS.some(e=>f.startsWith(e)||d.startsWith(e)))return"";if(!(0,u.matchSegment)(l,c)){var p;return null!=(p=a(r))?p:""}for(let t in i)if(s[t]){let r=e(i[t],s[t]);if(null!==r)return o(c)+"/"+r}return null}(e,t);return null==r||"/"===r?r:l(r)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},8744:function(e,t){"use strict";function r(e,t){return void 0===t&&(t=!0),e.pathname+e.search+(t?e.hash:"")}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createHrefFromUrl",{enumerable:!0,get:function(){return r}}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},89343:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createInitialRouterState",{enumerable:!0,get:function(){return a}});let n=r(15456),u=r(8744),o=r(90743),l=r(41781);function a(e){var t;let{buildId:r,initialTree:a,children:i,initialCanonicalUrl:c,initialParallelRoutes:s,isServer:f,location:d,initialHead:p}=e,h={status:n.CacheStates.READY,data:null,subTreeData:i,parallelRoutes:f?new Map:s};return(null===s||0===s.size)&&(0,o.fillLazyItemsTillLeafWithHead)(h,void 0,a,p),{buildId:r,tree:a,cache:h,prefetchCache:new Map,pushRef:{pendingPush:!1,mpaNavigation:!1},focusAndScrollRef:{apply:!1,hashFragment:null,segmentPaths:[]},canonicalUrl:d?(0,u.createHrefFromUrl)(d):c,nextUrl:null!=(t=(0,l.extractPathFromFlightRouterState)(a)||(null==d?void 0:d.pathname))?t:null}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},76486:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createOptimisticTree",{enumerable:!0,get:function(){return function e(t,r,u){let o;let[l,a,i,c,s]=r||[null,{}],f=t[0],d=1===t.length,p=null!==l&&(0,n.matchSegment)(l,f),h=Object.keys(a).length>1,_=!r||!p||h,y={};if(null!==l&&p&&(y=a),!d&&!h){let r=e(t.slice(1),y?y.children:null,u||_);o=r}let b=[f,{...y,...o?{children:o}:{}}];return i&&(b[2]=i),!u&&_?b[3]="refetch":p&&c&&(b[3]=c),p&&s&&(b[4]=s),b}}});let n=r(50655);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7718:function(e,t){"use strict";function r(e){return e.status="pending",e.then(t=>{"pending"===e.status&&(e.status="fulfilled",e.value=t)},t=>{"pending"===e.status&&(e.status="rejected",e.value=t)}),e}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createRecordFromThenable",{enumerable:!0,get:function(){return r}}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},49101:function(e,t){"use strict";function r(e,t){return void 0===t&&(t=!1),Array.isArray(e)?e[0]+"|"+e[1]+"|"+e[2]:t&&e.startsWith("__PAGE__")?"__PAGE__":e}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createRouterCacheKey",{enumerable:!0,get:function(){return r}}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},52368:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fetchServerResponse",{enumerable:!0,get:function(){return s}});let n=r(35456),u=r(34852),o=r(2353),l=r(303),a=r(74741),i=r(77279);function c(e){return[(0,o.urlToUrlWithoutFlightMarker)(e).toString(),void 0]}async function s(e,t,r,s,f){let d={[u.RSC]:"1",[u.NEXT_ROUTER_STATE_TREE]:encodeURIComponent(JSON.stringify(t))};f===a.PrefetchKind.AUTO&&(d[u.NEXT_ROUTER_PREFETCH]="1"),r&&(d[u.NEXT_URL]=r);let p=(0,i.hexHash)([d[u.NEXT_ROUTER_PREFETCH]||"0",d[u.NEXT_ROUTER_STATE_TREE]].join(","));try{let t=new URL(e);t.pathname.endsWith("/")?t.pathname+="index.txt":t.pathname+=".txt",t.searchParams.set(u.NEXT_RSC_UNION_QUERY,p);let r=await fetch(t,{credentials:"same-origin",headers:d}),a=(0,o.urlToUrlWithoutFlightMarker)(r.url),i=r.redirected?a:void 0,f=r.headers.get("content-type")||"",h=f===u.RSC_CONTENT_TYPE_HEADER;if(h||(h=f.startsWith("text/plain")),!h||!r.ok)return c(a.toString());let[_,y]=await (0,n.createFromFetch)(Promise.resolve(r),{callServer:l.callServer});if(s!==_)return c(r.url);return[y,i]}catch(t){return console.error("Failed to fetch RSC payload. Falling back to browser navigation.",t),[e.toString(),void 0]}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},70155:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fillCacheWithDataProperty",{enumerable:!0,get:function(){return function e(t,r,o,l,a){void 0===a&&(a=!1);let i=o.length<=2,[c,s]=o,f=(0,u.createRouterCacheKey)(s),d=r.parallelRoutes.get(c);if(!d||a&&r.parallelRoutes.size>1)return{bailOptimistic:!0};let p=t.parallelRoutes.get(c);p&&p!==d||(p=new Map(d),t.parallelRoutes.set(c,p));let h=d.get(f),_=p.get(f);if(i){_&&_.data&&_!==h||p.set(f,{status:n.CacheStates.DATA_FETCH,data:l(),subTreeData:null,parallelRoutes:new Map});return}if(!_||!h){_||p.set(f,{status:n.CacheStates.DATA_FETCH,data:l(),subTreeData:null,parallelRoutes:new Map});return}return _===h&&(_={status:_.status,data:_.data,subTreeData:_.subTreeData,parallelRoutes:new Map(_.parallelRoutes)},p.set(f,_)),e(_,h,o.slice(2),l)}}});let n=r(15456),u=r(49101);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},23033:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fillCacheWithNewSubTreeData",{enumerable:!0,get:function(){return function e(t,r,a,i){let c=a.length<=5,[s,f]=a,d=(0,l.createRouterCacheKey)(f),p=r.parallelRoutes.get(s);if(!p)return;let h=t.parallelRoutes.get(s);h&&h!==p||(h=new Map(p),t.parallelRoutes.set(s,h));let _=p.get(d),y=h.get(d);if(c){y&&y.data&&y!==_||(y={status:n.CacheStates.READY,data:null,subTreeData:a[3],parallelRoutes:_?new Map(_.parallelRoutes):new Map},_&&(0,u.invalidateCacheByRouterState)(y,_,a[2]),(0,o.fillLazyItemsTillLeafWithHead)(y,_,a[2],a[4],i),h.set(d,y));return}y&&_&&(y===_&&(y={status:y.status,data:y.data,subTreeData:y.subTreeData,parallelRoutes:new Map(y.parallelRoutes)},h.set(d,y)),e(y,_,a.slice(2),i))}}});let n=r(15456),u=r(18179),o=r(90743),l=r(49101);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},90743:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fillLazyItemsTillLeafWithHead",{enumerable:!0,get:function(){return function e(t,r,o,l,a){let i=0===Object.keys(o[1]).length;if(i){t.head=l;return}for(let i in o[1]){let c=o[1][i],s=c[0],f=(0,u.createRouterCacheKey)(s);if(r){let u=r.parallelRoutes.get(i);if(u){let r=new Map(u),o=r.get(f),s=a&&o?{status:o.status,data:o.data,subTreeData:o.subTreeData,parallelRoutes:new Map(o.parallelRoutes)}:{status:n.CacheStates.LAZY_INITIALIZED,data:null,subTreeData:null,parallelRoutes:new Map(null==o?void 0:o.parallelRoutes)};r.set(f,s),e(s,o,c,l,a),t.parallelRoutes.set(i,r);continue}}let d={status:n.CacheStates.LAZY_INITIALIZED,data:null,subTreeData:null,parallelRoutes:new Map},p=t.parallelRoutes.get(i);p?p.set(f,d):t.parallelRoutes.set(i,new Map([[f,d]])),e(d,void 0,c,l,a)}}}});let n=r(15456),u=r(49101);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},29231:function(e,t){"use strict";var r,n;function u(e){let{kind:t,prefetchTime:r,lastUsedTime:n}=e;return Date.now()<(null!=n?n:r)+3e4?n?"reusable":"fresh":"auto"===t&&Date.now()["children",e]).flat(),p=(0,c.fillCacheWithDataProperty)(f,e.cache,d,()=>(t||(t=(0,o.createRecordFromThenable)((0,u.fetchServerResponse)(r,i,e.nextUrl,e.buildId))),t),!0);if(!(null==p?void 0:p.bailOptimistic))return R.previousTree=e.tree,R.patchedTree=i,R.pendingPush=C,R.hashFragment=M,R.shouldScroll=S,R.scrollableSegments=[],R.cache=f,R.canonicalUrl=w,e.prefetchCache.set((0,a.createHrefFromUrl)(r,!1),{data:Promise.resolve(t),kind:h.PrefetchKind.TEMPORARY,prefetchTime:Date.now(),treeAtTimeOfPrefetch:e.tree,lastUsedTime:Date.now()}),(0,_.handleMutable)(e,R)}if(!A){let t=(0,o.createRecordFromThenable)((0,u.fetchServerResponse)(r,e.tree,e.nextUrl,e.buildId,void 0)),n={data:Promise.resolve(t),kind:h.PrefetchKind.TEMPORARY,prefetchTime:Date.now(),treeAtTimeOfPrefetch:e.tree,lastUsedTime:null};e.prefetchCache.set((0,a.createHrefFromUrl)(r,!1),n),A=n}let N=(0,b.getPrefetchEntryCacheStatus)(A),{treeAtTimeOfPrefetch:I,data:D}=A,[k,F]=(0,l.readRecordValue)(D);if(A.lastUsedTime=Date.now(),"string"==typeof k)return m(e,R,k,C);let U=e.tree,L=e.cache,H=[];for(let t of k){let o=t.slice(0,-4),l=t.slice(-3)[0],a=["",...o],s=(0,f.applyRouterStatePatchToTree)(a,U,l);if(null===s&&(s=(0,f.applyRouterStatePatchToTree)(a,I,l)),null!==s){if((0,p.isNavigatingToNewRootLayout)(U,s))return m(e,R,w,C);let f=(0,y.applyFlightData)(L,E,t,"auto"===A.kind&&N===b.PrefetchCacheEntryStatus.reusable);f||N!==b.PrefetchCacheEntryStatus.stale||(f=function(e,t,r,u,o){let l=!1;e.status=n.CacheStates.READY,e.subTreeData=t.subTreeData,e.parallelRoutes=new Map(t.parallelRoutes);let a=g(u).map(e=>[...r,...e]);for(let r of a){let n=(0,c.fillCacheWithDataProperty)(e,t,r,o);(null==n?void 0:n.bailOptimistic)||(l=!0)}return l}(E,L,o,l,()=>(0,u.fetchServerResponse)(r,U,e.nextUrl,e.buildId)));let h=(0,d.shouldHardNavigate)(a,U);for(let e of(h?(E.status=n.CacheStates.READY,E.subTreeData=L.subTreeData,(0,i.invalidateCacheBelowFlightSegmentPath)(E,L,o),R.cache=E):f&&(R.cache=E),L=E,U=s,g(l))){let t=[...o,...e];"__DEFAULT__"!==t[t.length-1]&&H.push(t)}}}return R.previousTree=e.tree,R.patchedTree=U,R.canonicalUrl=F?(0,a.createHrefFromUrl)(F):w,R.pendingPush=C,R.scrollableSegments=H,R.hashFragment=M,R.shouldScroll=S,(0,_.handleMutable)(e,R)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},72763:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"prefetchReducer",{enumerable:!0,get:function(){return c}});let n=r(8744),u=r(52368),o=r(74741),l=r(7718),a=r(62268),i=r(34852);function c(e,t){(0,a.prunePrefetchCache)(e.prefetchCache);let{url:r}=t;r.searchParams.delete(i.NEXT_RSC_UNION_QUERY);let c=(0,n.createHrefFromUrl)(r,!1),s=e.prefetchCache.get(c);if(s&&(s.kind===o.PrefetchKind.TEMPORARY&&e.prefetchCache.set(c,{...s,kind:t.kind}),!(s.kind===o.PrefetchKind.AUTO&&t.kind===o.PrefetchKind.FULL)))return e;let f=(0,l.createRecordFromThenable)((0,u.fetchServerResponse)(r,e.tree,e.nextUrl,e.buildId,t.kind));return e.prefetchCache.set(c,{treeAtTimeOfPrefetch:e.tree,data:f,kind:t.kind,prefetchTime:Date.now(),lastUsedTime:null}),e}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},62268:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"prunePrefetchCache",{enumerable:!0,get:function(){return u}});let n=r(29231);function u(e){for(let[t,r]of e)(0,n.getPrefetchEntryCacheStatus)(r)===n.PrefetchCacheEntryStatus.expired&&e.delete(t)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},49901:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"refreshReducer",{enumerable:!0,get:function(){return p}});let n=r(52368),u=r(7718),o=r(90168),l=r(8744),a=r(76031),i=r(58999),c=r(86664),s=r(14129),f=r(15456),d=r(90743);function p(e,t){let{cache:r,mutable:p,origin:h}=t,_=e.canonicalUrl,y=e.tree,b=JSON.stringify(p.previousTree)===JSON.stringify(y);if(b)return(0,s.handleMutable)(e,p);r.data||(r.data=(0,u.createRecordFromThenable)((0,n.fetchServerResponse)(new URL(_,h),[y[0],y[1],y[2],"refetch"],e.nextUrl,e.buildId)));let[v,m]=(0,o.readRecordValue)(r.data);if("string"==typeof v)return(0,c.handleExternalUrl)(e,p,v,e.pushRef.pendingPush);for(let t of(r.data=null,v)){if(3!==t.length)return console.log("REFRESH FAILED"),e;let[n]=t,u=(0,a.applyRouterStatePatchToTree)([""],y,n);if(null===u)throw Error("SEGMENT MISMATCH");if((0,i.isNavigatingToNewRootLayout)(y,u))return(0,c.handleExternalUrl)(e,p,_,e.pushRef.pendingPush);let o=m?(0,l.createHrefFromUrl)(m):void 0;m&&(p.canonicalUrl=o);let[s,h]=t.slice(-2);null!==s&&(r.status=f.CacheStates.READY,r.subTreeData=s,(0,d.fillLazyItemsTillLeafWithHead)(r,void 0,n,h),p.cache=r,p.prefetchCache=new Map),p.previousTree=y,p.patchedTree=u,p.canonicalUrl=_,y=u}return(0,s.handleMutable)(e,p)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},34520:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"restoreReducer",{enumerable:!0,get:function(){return u}});let n=r(8744);function u(e,t){let{url:r,tree:u}=t,o=(0,n.createHrefFromUrl)(r);return{buildId:e.buildId,canonicalUrl:o,pushRef:e.pushRef,focusAndScrollRef:e.focusAndScrollRef,cache:e.cache,prefetchCache:e.prefetchCache,tree:u,nextUrl:r.pathname}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},87366:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"serverActionReducer",{enumerable:!0,get:function(){return p}});let n=r(303),u=r(34852),o=r(7718),l=r(90168),a=r(35456),i=r(74741),c=r(12409),s=r(8744),f=r(14024);async function d(e,t){let r,{actionId:o,actionArgs:l}=t,i=await (0,a.encodeReply)(l),s=await fetch("",{method:"POST",headers:{Accept:u.RSC_CONTENT_TYPE_HEADER,"Next-Action":o,[u.NEXT_ROUTER_STATE_TREE]:JSON.stringify(e.tree),...e.nextUrl?{[u.NEXT_URL]:e.nextUrl}:{}},body:i}),f=s.headers.get("x-action-redirect");try{let e=JSON.parse(s.headers.get("x-action-revalidated")||"[[],0,0]");r={paths:e[0]||[],tag:!!e[1],cookie:e[2]}}catch(e){r={paths:[],tag:!1,cookie:!1}}let d=f?new URL((0,c.addBasePath)(f),window.location.origin):void 0;if(s.headers.get("content-type")===u.RSC_CONTENT_TYPE_HEADER){let e=await (0,a.createFromFetch)(Promise.resolve(s),{callServer:n.callServer});if(f){let[,t]=e;return{actionFlightData:null==t?void 0:t[1],redirectLocation:d,revalidatedParts:r}}{let[t,[,n]]=null!=e?e:[];return{actionResult:t,actionFlightData:n,redirectLocation:d,revalidatedParts:r}}}return{redirectLocation:d,revalidatedParts:r}}function p(e,t){if(t.mutable.serverActionApplied)return e;t.mutable.inFlightServerAction||(t.mutable.previousTree=e.tree,t.mutable.previousUrl=e.canonicalUrl,t.mutable.inFlightServerAction=(0,o.createRecordFromThenable)(d(e,t)));try{var r,n;let{actionResult:u,actionFlightData:a,redirectLocation:c,revalidatedParts:d}=(0,l.readRecordValue)(t.mutable.inFlightServerAction);if(d.tag||d.cookie?e.prefetchCache.clear():d.paths.length>0&&e.prefetchCache.clear(),c){if(a){let n=(0,s.createHrefFromUrl)(c,!1),u=e.prefetchCache.get(n);e.prefetchCache.set(n,{data:(0,o.createRecordFromThenable)(Promise.resolve([a,void 0])),kind:null!=(r=null==u?void 0:u.kind)?r:i.PrefetchKind.TEMPORARY,prefetchTime:Date.now(),treeAtTimeOfPrefetch:t.mutable.previousTree,lastUsedTime:null})}t.reject((0,f.getRedirectError)(c.toString(),f.RedirectType.push))}else{if(a){let r=(0,s.createHrefFromUrl)(new URL(t.mutable.previousUrl,window.location.origin),!1),u=e.prefetchCache.get(r);e.prefetchCache.set((0,s.createHrefFromUrl)(new URL(t.mutable.previousUrl,window.location.origin),!1),{data:(0,o.createRecordFromThenable)(Promise.resolve([a,void 0])),kind:null!=(n=null==u?void 0:u.kind)?n:i.PrefetchKind.TEMPORARY,prefetchTime:Date.now(),treeAtTimeOfPrefetch:t.mutable.previousTree,lastUsedTime:null}),setTimeout(()=>{t.changeByServerResponse(t.mutable.previousTree,a,void 0)})}t.resolve(u)}}catch(e){if("rejected"===e.status)t.reject(e.value);else throw e}return t.mutable.serverActionApplied=!0,e}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},77519:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"serverPatchReducer",{enumerable:!0,get:function(){return c}});let n=r(8744),u=r(76031),o=r(58999),l=r(86664),a=r(68654),i=r(14129);function c(e,t){let{flightData:r,previousTree:c,overrideCanonicalUrl:s,cache:f,mutable:d}=t,p=JSON.stringify(c)===JSON.stringify(e.tree);if(!p)return console.log("TREE MISMATCH"),e;if(d.previousTree)return(0,i.handleMutable)(e,d);if("string"==typeof r)return(0,l.handleExternalUrl)(e,d,r,e.pushRef.pendingPush);let h=e.tree,_=e.cache;for(let t of r){let r=t.slice(0,-4),[i]=t.slice(-3,-2),c=(0,u.applyRouterStatePatchToTree)(["",...r],h,i);if(null===c)throw Error("SEGMENT MISMATCH");if((0,o.isNavigatingToNewRootLayout)(h,c))return(0,l.handleExternalUrl)(e,d,e.canonicalUrl,e.pushRef.pendingPush);let p=s?(0,n.createHrefFromUrl)(s):void 0;p&&(d.canonicalUrl=p),(0,a.applyFlightData)(_,f,t),d.previousTree=h,d.patchedTree=c,d.cache=f,_=f,h=c}return(0,i.handleMutable)(e,d)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},74741:function(e,t){"use strict";var r,n;Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{PrefetchKind:function(){return r},ACTION_REFRESH:function(){return u},ACTION_NAVIGATE:function(){return o},ACTION_RESTORE:function(){return l},ACTION_SERVER_PATCH:function(){return a},ACTION_PREFETCH:function(){return i},ACTION_FAST_REFRESH:function(){return c},ACTION_SERVER_ACTION:function(){return s}});let u="refresh",o="navigate",l="restore",a="server-patch",i="prefetch",c="fast-refresh",s="server-action";(n=r||(r={})).AUTO="auto",n.FULL="full",n.TEMPORARY="temporary",("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},85426:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"reducer",{enumerable:!0,get:function(){return f}});let n=r(74741),u=r(86664),o=r(77519),l=r(34520),a=r(49901),i=r(72763),c=r(73800),s=r(87366),f=function(e,t){switch(t.type){case n.ACTION_NAVIGATE:return(0,u.navigateReducer)(e,t);case n.ACTION_SERVER_PATCH:return(0,o.serverPatchReducer)(e,t);case n.ACTION_RESTORE:return(0,l.restoreReducer)(e,t);case n.ACTION_REFRESH:return(0,a.refreshReducer)(e,t);case n.ACTION_FAST_REFRESH:return(0,c.fastRefreshReducer)(e,t);case n.ACTION_PREFETCH:return(0,i.prefetchReducer)(e,t);case n.ACTION_SERVER_ACTION:return(0,s.serverActionReducer)(e,t);default:throw Error("Unknown action")}};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},34712:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"shouldHardNavigate",{enumerable:!0,get:function(){return function e(t,r){let[u,o]=r,[l,a]=t;if(!(0,n.matchSegment)(l,u))return!!Array.isArray(l);let i=t.length<=2;return!i&&e(t.slice(2),o[a])}}});let n=r(50655);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},98323:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createSearchParamsBailoutProxy",{enumerable:!0,get:function(){return u}});let n=r(62620);function u(){return new Proxy({},{get(e,t){"string"==typeof t&&(0,n.staticGenerationBailout)("searchParams."+t)}})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},62620:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"staticGenerationBailout",{enumerable:!0,get:function(){return l}});let n=r(47308),u=r(30094);class o extends Error{constructor(...e){super(...e),this.code="NEXT_STATIC_GEN_BAILOUT"}}let l=(e,t)=>{let r=u.staticGenerationAsyncStorage.getStore();if(null==r?void 0:r.forceStatic)return!0;if(null==r?void 0:r.dynamicShouldError){let{dynamic:r="error",link:n}=t||{};throw new o('Page with `dynamic = "'+r+"\"` couldn't be rendered statically because it used `"+e+"`."+(n?" See more info here: "+n:""))}if(r&&(r.revalidate=0),null==r?void 0:r.isStaticGeneration){let t=new n.DynamicServerError(e);throw r.dynamicUsageDescription=e,r.dynamicUsageStack=t.stack,t}return!1};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},58531:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return l}});let n=r(26927),u=n._(r(86006)),o=r(98323);function l(e){let{Component:t,propsForComponent:r}=e,n=(0,o.createSearchParamsBailoutProxy)();return u.default.createElement(t,{searchParams:n,...r})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},18688:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"useReducerWithReduxDevtools",{enumerable:!0,get:function(){return o}});let n=r(86006);function u(e){if(e instanceof Map){let t={};for(let[r,n]of e.entries()){if("function"==typeof n){t[r]="fn()";continue}if("object"==typeof n&&null!==n){if(n.$$typeof){t[r]=n.$$typeof.toString();continue}if(n._bundlerConfig){t[r]="FlightData";continue}}t[r]=u(n)}return t}if("object"==typeof e&&null!==e){let t={};for(let r in e){let n=e[r];if("function"==typeof n){t[r]="fn()";continue}if("object"==typeof n&&null!==n){if(n.$$typeof){t[r]=n.$$typeof.toString();continue}if(n.hasOwnProperty("_bundlerConfig")){t[r]="FlightData";continue}}t[r]=u(n)}return t}return Array.isArray(e)?e.map(u):e}let o=function(e,t){let r=(0,n.useRef)(),o=(0,n.useRef)();(0,n.useEffect)(()=>{if(!r.current&&!1!==o.current){if(void 0===o.current&&void 0===window.__REDUX_DEVTOOLS_EXTENSION__){o.current=!1;return}return r.current=window.__REDUX_DEVTOOLS_EXTENSION__.connect({instanceId:8e3,name:"next-router"}),r.current&&r.current.init(u(t)),()=>{r.current=void 0}}},[t]);let[l,a]=(0,n.useReducer)((t,n)=>{let o=e(t,n);return r.current&&r.current.send(n,u(o)),o},t),i=(0,n.useCallback)(()=>{r.current&&r.current.send({type:"RENDER_SYNC"},u(l))},[l]);return[l,a,i]};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},75588:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"normalizePathTrailingSlash",{enumerable:!0,get:function(){return o}});let n=r(61402),u=r(74035),o=e=>{if(!e.startsWith("/"))return e;let{pathname:t,query:r,hash:o}=(0,u.parsePath)(e);return""+(0,n.removeTrailingSlash)(t)+r+o};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},59214:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return u}});let n=r(98687);function u(e){let t="function"==typeof reportError?reportError:e=>{window.console.error(e)};e.digest!==n.NEXT_DYNAMIC_NO_SSR_CODE&&t(e)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},15456:function(e,t,r){"use strict";var n,u;Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{CacheStates:function(){return n},AppRouterContext:function(){return a},LayoutRouterContext:function(){return i},GlobalLayoutRouterContext:function(){return c},TemplateContext:function(){return s}});let o=r(26927),l=o._(r(86006));(u=n||(n={})).LAZY_INITIALIZED="LAZYINITIALIZED",u.DATA_FETCH="DATAFETCH",u.READY="READY";let a=l.default.createContext(null),i=l.default.createContext(null),c=l.default.createContext(null),s=l.default.createContext(null)},77279:function(e,t){"use strict";function r(e){let t=5381;for(let r=0;r!t||"("===t[0]&&t.endsWith(")")||"@"===t[0]||("page"===t||"route"===t)&&r===n.length-1?e:e+"/"+t,""))}function o(e,t){return t?e.replace(/\.rsc($|\?)/,"$1"):e}},92998:function(e,t){"use strict";function r(e,t){void 0===t&&(t={});let r=document.documentElement,n=r.style.scrollBehavior;r.style.scrollBehavior="auto",t.dontForceLayout||r.getClientRects(),e(),r.style.scrollBehavior=n}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"handleSmoothScroll",{enumerable:!0,get:function(){return r}})},30753:function(e,t){"use strict";function r(e){return/Googlebot|Mediapartners-Google|AdsBot-Google|googleweblight|Storebot-Google|Google-PageRenderer|Bingbot|BingPreview|Slurp|DuckDuckBot|baiduspider|yandex|sogou|LinkedInBot|bitlybot|tumblr|vkShare|quora link preview|facebookexternalhit|facebookcatalog|Twitterbot|applebot|redditbot|Slackbot|Discordbot|WhatsApp|SkypeUriPreview|ia_archiver/i.test(e)}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"isBot",{enumerable:!0,get:function(){return r}})},74035:function(e,t){"use strict";function r(e){let t=e.indexOf("#"),r=e.indexOf("?"),n=r>-1&&(t<0||r-1?{pathname:e.substring(0,n?r:t),query:n?e.substring(r,t>-1?t:void 0):"",hash:t>-1?e.slice(t):""}:{pathname:e,query:"",hash:""}}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"parsePath",{enumerable:!0,get:function(){return r}})},61402:function(e,t){"use strict";function r(e){return e.replace(/\/$/,"")||"/"}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"removeTrailingSlash",{enumerable:!0,get:function(){return r}})},73476:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{ServerInsertedHTMLContext:function(){return o},useServerInsertedHTML:function(){return l}});let n=r(25909),u=n._(r(86006)),o=u.default.createContext(null);function l(e){let t=(0,u.useContext)(o);t&&t(e)}},75862:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createAsyncLocalStorage",{enumerable:!0,get:function(){return o}});let r=Error("Invariant: AsyncLocalStorage accessed in runtime where it is not available");class n{disable(){throw r}getStore(){}run(){throw r}exit(){throw r}enterWith(){throw r}}let u=globalThis.AsyncLocalStorage;function o(){return u?new u:new n}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},24437:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"requestAsyncStorage",{enumerable:!0,get:function(){return u}});let n=r(75862),u=(0,n.createAsyncLocalStorage)();("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},30094:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"staticGenerationAsyncStorage",{enumerable:!0,get:function(){return u}});let n=r(75862),u=(0,n.createAsyncLocalStorage)();("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},93194:function(e,t,r){"use strict";var n=r(8431);t.createRoot=n.createRoot,t.hydrateRoot=n.hydrateRoot},8431:function(e,t,r){"use strict";!function e(){if("undefined"!=typeof __REACT_DEVTOOLS_GLOBAL_HOOK__&&"function"==typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE)try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(e){console.error(e)}}(),e.exports=r(42614)},82672:function(e,t,r){"use strict";/** - * @license React - * react-server-dom-webpack-client.browser.production.min.js - * - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var n=r(8431),u=r(86006),o={stream:!0},l=new Map;function a(e){var t=globalThis.__next_require__(e);return"function"!=typeof t.then||"fulfilled"===t.status?null:(t.then(function(e){t.status="fulfilled",t.value=e},function(e){t.status="rejected",t.reason=e}),t)}function i(){}var c=n.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED.Dispatcher,s=Symbol.for("react.element"),f=Symbol.for("react.lazy"),d=Symbol.for("react.default_value"),p=Symbol.iterator,h=Array.isArray,_=new WeakMap,y=u.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED.ContextRegistry;function b(e,t,r,n){this.status=e,this.value=t,this.reason=r,this._response=n}function v(e){switch(e.status){case"resolved_model":j(e);break;case"resolved_module":S(e)}switch(e.status){case"fulfilled":return e.value;case"pending":case"blocked":throw e;default:throw e.reason}}function m(e,t){for(var r=0;rd?(h=d,d=3,f++):(h=0,d=3);continue;case 2:44===(v=s[f++])?d=4:_=_<<4|(96s.length&&(v=-1)}var m=s.byteOffset+f;if(-1>>1,u=e[n];if(0>>1;no(i,r))co(s,i)?(e[n]=s,e[c]=r,n=c):(e[n]=i,e[a]=r,n=a);else if(co(s,r))e[n]=s,e[c]=r,n=c;else break}}return t}function o(e,t){var r=e.sortIndex-t.sortIndex;return 0!==r?r:e.id-t.id}if(t.unstable_now=void 0,"object"==typeof performance&&"function"==typeof performance.now){var l,a=performance;t.unstable_now=function(){return a.now()}}else{var i=Date,c=i.now();t.unstable_now=function(){return i.now()-c}}var s=[],f=[],d=1,p=null,h=3,_=!1,y=!1,b=!1,v="function"==typeof setTimeout?setTimeout:null,m="function"==typeof clearTimeout?clearTimeout:null,g="undefined"!=typeof setImmediate?setImmediate:null;function O(e){for(var t=n(f);null!==t;){if(null===t.callback)u(f);else if(t.startTime<=e)u(f),t.sortIndex=t.expirationTime,r(s,t);else break;t=n(f)}}function P(e){if(b=!1,O(e),!y){if(null!==n(s))y=!0,N(E);else{var t=n(f);null!==t&&I(P,t.startTime-e)}}}function E(e,r){y=!1,b&&(b=!1,m(S),S=-1),_=!0;var o=h;try{e:{for(O(r),p=n(s);null!==p&&(!(p.expirationTime>r)||e&&!w());){var l=p.callback;if("function"==typeof l){p.callback=null,h=p.priorityLevel;var a=l(p.expirationTime<=r);if(r=t.unstable_now(),"function"==typeof a){p.callback=a,O(r);var i=!0;break e}p===n(s)&&u(s),O(r)}else u(s);p=n(s)}if(null!==p)i=!0;else{var c=n(f);null!==c&&I(P,c.startTime-r),i=!1}}return i}finally{p=null,h=o,_=!1}}"undefined"!=typeof navigator&&void 0!==navigator.scheduling&&void 0!==navigator.scheduling.isInputPending&&navigator.scheduling.isInputPending.bind(navigator.scheduling);var R=!1,j=null,S=-1,T=5,M=-1;function w(){return!(t.unstable_now()-Me||125l?(e.sortIndex=o,r(f,e),null===n(s)&&e===n(f)&&(b?(m(S),S=-1):b=!0,I(P,o-l))):(e.sortIndex=a,r(s,e),y||_||(y=!0,N(E))),e},t.unstable_shouldYield=w,t.unstable_wrapCallback=function(e){var t=h;return function(){var r=h;h=t;try{return e.apply(this,arguments)}finally{h=r}}}},26183:function(e,t,r){"use strict";e.exports=r(24248)},24778:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"getSegmentParam",{enumerable:!0,get:function(){return u}});let n=r(47399);function u(e){let t=n.INTERCEPTION_ROUTE_MARKERS.find(t=>e.startsWith(t));return(t&&(e=e.slice(t.length)),e.startsWith("[[...")&&e.endsWith("]]"))?{type:"optional-catchall",param:e.slice(5,-2)}:e.startsWith("[...")&&e.endsWith("]")?{type:"catchall",param:e.slice(4,-1)}:e.startsWith("[")&&e.endsWith("]")?{type:"dynamic",param:e.slice(1,-1)}:null}},47399:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{INTERCEPTION_ROUTE_MARKERS:function(){return u},isInterceptionRouteAppPath:function(){return o},extractInterceptionRouteInformation:function(){return l}});let n=r(24241),u=["(..)(..)","(.)","(..)","(...)"];function o(e){return void 0!==e.split("/").find(e=>u.find(t=>e.startsWith(t)))}function l(e){let t,r,o;for(let n of e.split("/"))if(r=u.find(e=>n.startsWith(e))){[t,o]=e.split(r,2);break}if(!t||!r||!o)throw Error(`Invalid interception route: ${e}. Must be in the format //(..|...|..)(..)/`);switch(t=(0,n.normalizeAppPath)(t),r){case"(.)":o="/"===t?`/${o}`:t+"/"+o;break;case"(..)":if("/"===t)throw Error(`Invalid interception route: ${e}. Cannot use (..) marker at the root level, use (.) instead.`);o=t.split("/").slice(0,-1).concat(o).join("/");break;case"(...)":o="/"+o;break;case"(..)(..)":let l=t.split("/");if(l.length<=2)throw Error(`Invalid interception route: ${e}. Cannot use (..)(..) marker at the root level or one level up.`);o=l.slice(0,-2).concat(o).join("/");break;default:throw Error("Invariant: unexpected marker")}return{interceptingRoute:t,interceptedRoute:o}}},26927:function(e,t,r){"use strict";function n(e){return e&&e.__esModule?e:{default:e}}r.r(t),r.d(t,{_:function(){return n},_interop_require_default:function(){return n}})},25909:function(e,t,r){"use strict";function n(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(n=function(e){return e?r:t})(e)}function u(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=n(t);if(r&&r.has(e))return r.get(e);var u={},o=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var l in e)if("default"!==l&&Object.prototype.hasOwnProperty.call(e,l)){var a=o?Object.getOwnPropertyDescriptor(e,l):null;a&&(a.get||a.set)?Object.defineProperty(u,l,a):u[l]=e[l]}return u.default=e,r&&r.set(e,u),u}r.r(t),r.d(t,{_:function(){return u},_interop_require_wildcard:function(){return u}})}}]); \ No newline at end of file diff --git a/spaces/langvision/codellama-34b-chat/README.md b/spaces/langvision/codellama-34b-chat/README.md deleted file mode 100644 index 01dd7bb33b9c73b989256aafbcc69ed67c83f6c7..0000000000000000000000000000000000000000 --- a/spaces/langvision/codellama-34b-chat/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: CodeLlama-34B -emoji: 🧑‍💻🦙 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/legoandmars/glide-inpainting/glide_text2im/tokenizer/bpe.py b/spaces/legoandmars/glide-inpainting/glide_text2im/tokenizer/bpe.py deleted file mode 100644 index 5dcd56586a9c7bd974c1dd264152ecb70f909619..0000000000000000000000000000000000000000 --- a/spaces/legoandmars/glide-inpainting/glide_text2im/tokenizer/bpe.py +++ /dev/null @@ -1,151 +0,0 @@ -""" -Byte pair encoding utilities adapted from: -https://github.com/openai/gpt-2/blob/master/src/encoder.py -""" - -import gzip -import json -import os -from functools import lru_cache -from typing import List, Tuple - -import regex as re - - -@lru_cache() -def bytes_to_unicode(): - """ - Returns list of utf-8 byte and a corresponding list of unicode strings. - The reversible bpe codes work on unicode strings. - This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. - When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. - This is a signficant percentage of your normal, say, 32K bpe vocab. - To avoid that, we want lookup tables between utf-8 bytes and unicode strings. - And avoids mapping to whitespace/control characters the bpe code barfs on. - """ - bs = ( - list(range(ord("!"), ord("~") + 1)) - + list(range(ord("¡"), ord("¬") + 1)) - + list(range(ord("®"), ord("ÿ") + 1)) - ) - cs = bs[:] - n = 0 - for b in range(2 ** 8): - if b not in bs: - bs.append(b) - cs.append(2 ** 8 + n) - n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) - - -def get_pairs(word): - """Return set of symbol pairs in a word. - Word is represented as tuple of symbols (symbols being variable-length strings). - """ - pairs = set() - prev_char = word[0] - for char in word[1:]: - pairs.add((prev_char, char)) - prev_char = char - return pairs - - -class Encoder: - def __init__(self, encoder, bpe_merges, errors="replace"): - self.encoder = encoder - self.decoder = {v: k for k, v in self.encoder.items()} - self.errors = errors # how to handle errors in decoding - self.byte_encoder = bytes_to_unicode() - self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} - self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) - self.cache = {} - - # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions - self.pat = re.compile( - r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" - ) - - @property - def n_vocab(self) -> int: - return len(self.encoder) - - @property - def end_token(self) -> int: - return self.n_vocab - 1 - - def padded_tokens_and_mask( - self, tokens: List[int], text_ctx: int - ) -> Tuple[List[int], List[bool]]: - tokens = tokens[:text_ctx] - padding = text_ctx - len(tokens) - padded_tokens = tokens + [self.end_token] * padding - mask = [True] * len(tokens) + [False] * padding - return padded_tokens, mask - - def bpe(self, token): - if token in self.cache: - return self.cache[token] - word = tuple(token) - pairs = get_pairs(word) - - if not pairs: - return token - - while True: - bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) - if bigram not in self.bpe_ranks: - break - first, second = bigram - new_word = [] - i = 0 - while i < len(word): - try: - j = word.index(first, i) - new_word.extend(word[i:j]) - i = j - except: # pylint: disable=bare-except - new_word.extend(word[i:]) - break - - if word[i] == first and i < len(word) - 1 and word[i + 1] == second: - new_word.append(first + second) - i += 2 - else: - new_word.append(word[i]) - i += 1 - new_word = tuple(new_word) - word = new_word - if len(word) == 1: - break - else: - pairs = get_pairs(word) - word = " ".join(word) - self.cache[token] = word - return word - - def encode(self, text): - text = text.lower() - bpe_tokens = [] - for token in re.findall(self.pat, text): - token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) - bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")) - return bpe_tokens - - def decode(self, tokens): - text = "".join([self.decoder[token] for token in tokens]) - text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) - return text - - -def get_encoder(): - root_dir = os.path.dirname(os.path.abspath(__file__)) - with gzip.open(os.path.join(root_dir, "encoder.json.gz"), "r") as f: - encoder = json.load(f) - with gzip.open(os.path.join(root_dir, "vocab.bpe.gz"), "r") as f: - bpe_data = str(f.read(), "utf-8") - bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split("\n")[1:-1]] - return Encoder( - encoder=encoder, - bpe_merges=bpe_merges, - ) diff --git a/spaces/leogabraneth/text-generation-webui-main/extensions/openai/edits.py b/spaces/leogabraneth/text-generation-webui-main/extensions/openai/edits.py deleted file mode 100644 index edf4e6c05611f0f0d2e526f82c4ebc5f477e9c9f..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/extensions/openai/edits.py +++ /dev/null @@ -1,101 +0,0 @@ -import time - -import yaml -from extensions.openai.defaults import get_default_req_params -from extensions.openai.errors import InvalidRequestError -from extensions.openai.utils import debug_msg -from modules import shared -from modules.text_generation import encode, generate_reply - - -def edits(instruction: str, input: str, temperature=1.0, top_p=1.0) -> dict: - - created_time = int(time.time() * 1000) - - # Request parameters - req_params = get_default_req_params() - stopping_strings = [] - - # Alpaca is verbose so a good default prompt - default_template = ( - "Below is an instruction that describes a task, paired with an input that provides further context. " - "Write a response that appropriately completes the request.\n\n" - "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n" - ) - - instruction_template = default_template - - # Use the special instruction/input/response template for anything trained like Alpaca - if shared.settings['instruction_template']: - if 'Alpaca' in shared.settings['instruction_template']: - stopping_strings.extend(['\n###']) - else: - try: - instruct = yaml.safe_load(open(f"instruction-templates/{shared.settings['instruction_template']}.yaml", 'r')) - - template = instruct['turn_template'] - template = template\ - .replace('<|user|>', instruct.get('user', ''))\ - .replace('<|bot|>', instruct.get('bot', ''))\ - .replace('<|user-message|>', '{instruction}\n{input}') - - instruction_template = instruct.get('context', '') + template[:template.find('<|bot-message|>')].rstrip(' ') - if instruct['user']: - stopping_strings.extend(['\n' + instruct['user'], instruct['user']]) - - except Exception as e: - instruction_template = default_template - print(f"Exception: When loading instruction-templates/{shared.settings['instruction_template']}.yaml: {repr(e)}") - print("Warning: Loaded default instruction-following template (Alpaca) for model.") - else: - stopping_strings.extend(['\n###']) - print("Warning: Loaded default instruction-following template (Alpaca) for model.") - - edit_task = instruction_template.format(instruction=instruction, input=input) - - truncation_length = shared.settings['truncation_length'] - - token_count = len(encode(edit_task)[0]) - max_tokens = truncation_length - token_count - - if max_tokens < 1: - err_msg = f"This model maximum context length is {truncation_length} tokens. However, your messages resulted in over {truncation_length - max_tokens} tokens." - raise InvalidRequestError(err_msg, param='input') - - req_params['max_new_tokens'] = max_tokens - req_params['truncation_length'] = truncation_length - req_params['temperature'] = temperature - req_params['top_p'] = top_p - req_params['seed'] = shared.settings.get('seed', req_params['seed']) - req_params['add_bos_token'] = shared.settings.get('add_bos_token', req_params['add_bos_token']) - req_params['custom_stopping_strings'] = shared.settings['custom_stopping_strings'] - - debug_msg({'edit_template': edit_task, 'req_params': req_params, 'token_count': token_count}) - - generator = generate_reply(edit_task, req_params, stopping_strings=stopping_strings, is_chat=False) - - answer = '' - for a in generator: - answer = a - - # some reply's have an extra leading space to fit the instruction template, just clip it off from the reply. - if edit_task[-1] != '\n' and answer and answer[0] == ' ': - answer = answer[1:] - - completion_token_count = len(encode(answer)[0]) - - resp = { - "object": "edit", - "created": created_time, - "choices": [{ - "text": answer, - "index": 0, - }], - "usage": { - "prompt_tokens": token_count, - "completion_tokens": completion_token_count, - "total_tokens": token_count + completion_token_count - } - } - - return resp diff --git a/spaces/leogabraneth/text-generation-webui-main/repositories/exllama/webui/templates/index.html b/spaces/leogabraneth/text-generation-webui-main/repositories/exllama/webui/templates/index.html deleted file mode 100644 index 25c3e4a5f30abe82f8ada5a800c7ff2fa0240855..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/repositories/exllama/webui/templates/index.html +++ /dev/null @@ -1,83 +0,0 @@ - - - - EXLlama - - - - - - - - - - - - - - - - - - - -
            -
            - -
            -
            -
            - -
            -
            -
            -
            - -
            -
            -
            - -
            -
            Model
            - -
            - -
            -
            Fixed prompt
            - -
            - -
            -
            Participants
            -
            -
            -
            - -
            -
            Sampler
            -
            -
            - -
            -
            Stop condition
            -
            -
            - -
            -
            - -
            -
            Repetition penalty
            -
            -
            - -
            -
            - - - \ No newline at end of file diff --git a/spaces/leonelhs/faceshine/utils.py b/spaces/leonelhs/faceshine/utils.py deleted file mode 100644 index 9062dd8fa65718e691987b2a5c852423834097ef..0000000000000000000000000000000000000000 --- a/spaces/leonelhs/faceshine/utils.py +++ /dev/null @@ -1,27 +0,0 @@ -from typing import Tuple - -import PIL -from PIL.Image import Image as PILImage - - -def get_background_dominant_color(img: PILImage, mask: PILImage) -> tuple: - negative_img = img.copy() - negative_mask = PIL.ImageOps.invert(mask) - negative_img.putalpha(negative_mask) - negative_img = negative_img.resize((1, 1)) - r, g, b, a = negative_img.getpixel((0, 0)) - return r, g, b, 255 - - -def apply_background_color(img: PILImage, color: Tuple[int, int, int, int]) -> PILImage: - r, g, b, a = color - colored_image = PIL.Image.new("RGBA", img.size, (r, g, b, a)) - colored_image.paste(img, mask=img) - return colored_image - - -def make_flatten_background(img, mask): - img = PIL.Image.open(img) - mask = PIL.Image.open(mask) - color = get_background_dominant_color(img, mask) - return apply_background_color(img, color) diff --git a/spaces/lindeberg/whisper-webui/src/segments.py b/spaces/lindeberg/whisper-webui/src/segments.py deleted file mode 100644 index ec2650dceade5d0b2022264f6419115eab085aea..0000000000000000000000000000000000000000 --- a/spaces/lindeberg/whisper-webui/src/segments.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import Any, Dict, List - -import copy - -def merge_timestamps(timestamps: List[Dict[str, Any]], merge_window: float = 5, max_merge_size: float = 30, padding_left: float = 1, padding_right: float = 1): - result = [] - - if len(timestamps) == 0: - return result - if max_merge_size is None: - return timestamps - - if padding_left is None: - padding_left = 0 - if padding_right is None: - padding_right = 0 - - processed_time = 0 - current_segment = None - - for i in range(len(timestamps)): - next_segment = timestamps[i] - - delta = next_segment['start'] - processed_time - - # Note that segments can still be longer than the max merge size, they just won't be merged in that case - if current_segment is None or (merge_window is not None and delta > merge_window) \ - or next_segment['end'] - current_segment['start'] > max_merge_size: - # Finish the current segment - if current_segment is not None: - # Add right padding - finish_padding = min(padding_right, delta / 2) if delta < padding_left + padding_right else padding_right - current_segment['end'] += finish_padding - delta -= finish_padding - - result.append(current_segment) - - # Start a new segment - current_segment = copy.deepcopy(next_segment) - - # Pad the segment - current_segment['start'] = current_segment['start'] - min(padding_left, delta) - processed_time = current_segment['end'] - - else: - # Merge the segment - current_segment['end'] = next_segment['end'] - processed_time = current_segment['end'] - - # Add the last segment - if current_segment is not None: - current_segment['end'] += padding_right - result.append(current_segment) - - return result \ No newline at end of file diff --git a/spaces/lixq/bingo61/src/pages/api/sydney.ts b/spaces/lixq/bingo61/src/pages/api/sydney.ts deleted file mode 100644 index 0e7bbf23d77c2e1a6635185a060eeee58b8c8e66..0000000000000000000000000000000000000000 --- a/spaces/lixq/bingo61/src/pages/api/sydney.ts +++ /dev/null @@ -1,62 +0,0 @@ -import { NextApiRequest, NextApiResponse } from 'next' -import { WebSocket, debug } from '@/lib/isomorphic' -import { BingWebBot } from '@/lib/bots/bing' -import { websocketUtils } from '@/lib/bots/bing/utils' -import { WatchDog, createHeaders } from '@/lib/utils' - - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const conversationContext = req.body - const headers = createHeaders(req.cookies) - debug(headers) - res.setHeader('Content-Type', 'text/stream; charset=UTF-8') - - const ws = new WebSocket('wss://sydney.bing.com/sydney/ChatHub', { - headers: { - ...headers, - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - pragma: 'no-cache', - } - }) - - const closeDog = new WatchDog() - const timeoutDog = new WatchDog() - ws.onmessage = (event) => { - timeoutDog.watch(() => { - ws.send(websocketUtils.packMessage({ type: 6 })) - }, 1500) - closeDog.watch(() => { - ws.close() - }, 10000) - res.write(event.data) - if (/\{"type":([367])\}/.test(String(event.data))) { - const type = parseInt(RegExp.$1, 10) - debug('connection type', type) - if (type === 3) { - ws.close() - } else { - ws.send(websocketUtils.packMessage({ type })) - } - } - } - - ws.onclose = () => { - timeoutDog.reset() - closeDog.reset() - debug('connection close') - res.end() - } - - await new Promise((resolve) => ws.onopen = resolve) - ws.send(websocketUtils.packMessage({ protocol: 'json', version: 1 })) - ws.send(websocketUtils.packMessage({ type: 6 })) - ws.send(websocketUtils.packMessage(BingWebBot.buildChatRequest(conversationContext!))) - req.socket.once('close', () => { - ws.close() - if (!res.closed) { - res.end() - } - }) -} diff --git a/spaces/lolikme/gsdf-Counterfeit-V2.0/app.py b/spaces/lolikme/gsdf-Counterfeit-V2.0/app.py deleted file mode 100644 index 9f9e01c4a704f4bf5f8569b778c62b55160fa270..0000000000000000000000000000000000000000 --- a/spaces/lolikme/gsdf-Counterfeit-V2.0/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/gsdf/Counterfeit-V2.0").launch() \ No newline at end of file diff --git a/spaces/luost26/DiffAb/abnumber/__version__.py b/spaces/luost26/DiffAb/abnumber/__version__.py deleted file mode 100644 index 0404d81037f66e8dfa8fa4e18458619876f083bc..0000000000000000000000000000000000000000 --- a/spaces/luost26/DiffAb/abnumber/__version__.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '0.3.0' diff --git a/spaces/luost26/DiffAb/diffab/modules/common/so3.py b/spaces/luost26/DiffAb/diffab/modules/common/so3.py deleted file mode 100644 index 794b65c7bff9289e0cc5b6cf2c1a37f9db2cb6f5..0000000000000000000000000000000000000000 --- a/spaces/luost26/DiffAb/diffab/modules/common/so3.py +++ /dev/null @@ -1,146 +0,0 @@ -import math -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .geometry import quaternion_to_rotation_matrix - - -def log_rotation(R): - trace = R[..., range(3), range(3)].sum(-1) - if torch.is_grad_enabled(): - # The derivative of acos at -1.0 is -inf, so to stablize the gradient, we use -0.9999 - min_cos = -0.999 - else: - min_cos = -1.0 - cos_theta = ( (trace-1) / 2 ).clamp_min(min=min_cos) - sin_theta = torch.sqrt(1 - cos_theta**2) - theta = torch.acos(cos_theta) - coef = ((theta+1e-8)/(2*sin_theta+2e-8))[..., None, None] - logR = coef * (R - R.transpose(-1, -2)) - return logR - - -def skewsym_to_so3vec(S): - x = S[..., 1, 2] - y = S[..., 2, 0] - z = S[..., 0, 1] - w = torch.stack([x,y,z], dim=-1) - return w - - -def so3vec_to_skewsym(w): - x, y, z = torch.unbind(w, dim=-1) - o = torch.zeros_like(x) - S = torch.stack([ - o, z, -y, - -z, o, x, - y, -x, o, - ], dim=-1).reshape(w.shape[:-1] + (3, 3)) - return S - - -def exp_skewsym(S): - x = torch.linalg.norm(skewsym_to_so3vec(S), dim=-1) - I = torch.eye(3).to(S).view([1 for _ in range(S.dim()-2)] + [3, 3]) - - sinx, cosx = torch.sin(x), torch.cos(x) - b = (sinx + 1e-8) / (x + 1e-8) - c = (1-cosx + 1e-8) / (x**2 + 2e-8) # lim_{x->0} (1-cosx)/(x^2) = 0.5 - - S2 = S @ S - return I + b[..., None, None]*S + c[..., None, None]*S2 - - -def so3vec_to_rotation(w): - return exp_skewsym(so3vec_to_skewsym(w)) - - -def rotation_to_so3vec(R): - logR = log_rotation(R) - w = skewsym_to_so3vec(logR) - return w - - -def random_uniform_so3(size, device='cpu'): - q = F.normalize(torch.randn(list(size)+[4,], device=device), dim=-1) # (..., 4) - return rotation_to_so3vec(quaternion_to_rotation_matrix(q)) - - -class ApproxAngularDistribution(nn.Module): - - def __init__(self, stddevs, std_threshold=0.1, num_bins=8192, num_iters=1024): - super().__init__() - self.std_threshold = std_threshold - self.num_bins = num_bins - self.num_iters = num_iters - self.register_buffer('stddevs', torch.FloatTensor(stddevs)) - self.register_buffer('approx_flag', self.stddevs <= std_threshold) - self._precompute_histograms() - - @staticmethod - def _pdf(x, e, L): - """ - Args: - x: (N, ) - e: Float - L: Integer - """ - x = x[:, None] # (N, *) - c = ((1 - torch.cos(x)) / math.pi) # (N, *) - l = torch.arange(0, L)[None, :] # (*, L) - a = (2*l+1) * torch.exp(-l*(l+1)*(e**2)) # (*, L) - b = (torch.sin( (l+0.5)* x ) + 1e-6) / (torch.sin( x / 2 ) + 1e-6) # (N, L) - - f = (c * a * b).sum(dim=1) - return f - - def _precompute_histograms(self): - X, Y = [], [] - for std in self.stddevs: - std = std.item() - x = torch.linspace(0, math.pi, self.num_bins) # (n_bins,) - y = self._pdf(x, std, self.num_iters) # (n_bins,) - y = torch.nan_to_num(y).clamp_min(0) - X.append(x) - Y.append(y) - self.register_buffer('X', torch.stack(X, dim=0)) # (n_stddevs, n_bins) - self.register_buffer('Y', torch.stack(Y, dim=0)) # (n_stddevs, n_bins) - - def sample(self, std_idx): - """ - Args: - std_idx: Indices of standard deviation. - Returns: - samples: Angular samples [0, PI), same size as std. - """ - size = std_idx.size() - std_idx = std_idx.flatten() # (N,) - - # Samples from histogram - prob = self.Y[std_idx] # (N, n_bins) - bin_idx = torch.multinomial(prob[:, :-1], num_samples=1).squeeze(-1) # (N,) - bin_start = self.X[std_idx, bin_idx] # (N,) - bin_width = self.X[std_idx, bin_idx+1] - self.X[std_idx, bin_idx] - samples_hist = bin_start + torch.rand_like(bin_start) * bin_width # (N,) - - # Samples from Gaussian approximation - mean_gaussian = self.stddevs[std_idx]*2 - std_gaussian = self.stddevs[std_idx] - samples_gaussian = mean_gaussian + torch.randn_like(mean_gaussian) * std_gaussian - samples_gaussian = samples_gaussian.abs() % math.pi - - # Choose from histogram or Gaussian - gaussian_flag = self.approx_flag[std_idx] - samples = torch.where(gaussian_flag, samples_gaussian, samples_hist) - - return samples.reshape(size) - - -def random_normal_so3(std_idx, angular_distrib, device='cpu'): - size = std_idx.size() - u = F.normalize(torch.randn(list(size)+[3,], device=device), dim=-1) - theta = angular_distrib.sample(std_idx) - w = u * theta[..., None] - return w diff --git a/spaces/luost26/DiffAb/diffab/tools/eval/run.py b/spaces/luost26/DiffAb/diffab/tools/eval/run.py deleted file mode 100644 index f902e40d5e26adf53b6dde14ea0d94d77a12903f..0000000000000000000000000000000000000000 --- a/spaces/luost26/DiffAb/diffab/tools/eval/run.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -import argparse -import ray -import shelve -import time -import pandas as pd -from typing import Mapping - -from tools.eval.base import EvalTask, TaskScanner -from tools.eval.similarity import eval_similarity -from tools.eval.energy import eval_interface_energy - - -@ray.remote(num_cpus=1) -def evaluate(task, args): - funcs = [] - funcs.append(eval_similarity) - if not args.no_energy: - funcs.append(eval_interface_energy) - for f in funcs: - task = f(task) - return task - - -def dump_db(db: Mapping[str, EvalTask], path): - table = [] - for task in db.values(): - if 'abopt' in path and task.scores['seqid'] >= 100.0: - # In abopt (Antibody Optimization) mode, ignore sequences identical to the wild-type - continue - table.append(task.to_report_dict()) - table = pd.DataFrame(table) - table.to_csv(path, index=False, float_format='%.6f') - return table - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--root', type=str, default='./results') - parser.add_argument('--pfx', type=str, default='rosetta') - parser.add_argument('--no_energy', action='store_true', default=False) - args = parser.parse_args() - ray.init() - - db_path = os.path.join(args.root, 'evaluation_db') - with shelve.open(db_path) as db: - scanner = TaskScanner(root=args.root, postfix=args.pfx, db=db) - - while True: - tasks = scanner.scan() - futures = [evaluate.remote(t, args) for t in tasks] - if len(futures) > 0: - print(f'Submitted {len(futures)} tasks.') - while len(futures) > 0: - done_ids, futures = ray.wait(futures, num_returns=1) - for done_id in done_ids: - done_task = ray.get(done_id) - done_task.save_to_db(db) - print(f'Remaining {len(futures)}. Finished {done_task.in_path}') - db.sync() - - dump_db(db, os.path.join(args.root, 'summary.csv')) - time.sleep(1.0) - -if __name__ == '__main__': - main() diff --git a/spaces/ma-xu/LIVE/pybind11/tests/test_chrono.py b/spaces/ma-xu/LIVE/pybind11/tests/test_chrono.py deleted file mode 100644 index 76783905a3bc9b60e5b58afdbdf592e88afb4f74..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pybind11/tests/test_chrono.py +++ /dev/null @@ -1,202 +0,0 @@ -# -*- coding: utf-8 -*- -from pybind11_tests import chrono as m -import datetime -import pytest - -import env # noqa: F401 - - -def test_chrono_system_clock(): - - # Get the time from both c++ and datetime - date0 = datetime.datetime.today() - date1 = m.test_chrono1() - date2 = datetime.datetime.today() - - # The returned value should be a datetime - assert isinstance(date1, datetime.datetime) - - # The numbers should vary by a very small amount (time it took to execute) - diff_python = abs(date2 - date0) - diff = abs(date1 - date2) - - # There should never be a days difference - assert diff.days == 0 - - # Since datetime.datetime.today() calls time.time(), and on some platforms - # that has 1 second accuracy, we compare this way - assert diff.seconds <= diff_python.seconds - - -def test_chrono_system_clock_roundtrip(): - date1 = datetime.datetime.today() - - # Roundtrip the time - date2 = m.test_chrono2(date1) - - # The returned value should be a datetime - assert isinstance(date2, datetime.datetime) - - # They should be identical (no information lost on roundtrip) - diff = abs(date1 - date2) - assert diff.days == 0 - assert diff.seconds == 0 - assert diff.microseconds == 0 - - -def test_chrono_system_clock_roundtrip_date(): - date1 = datetime.date.today() - - # Roundtrip the time - datetime2 = m.test_chrono2(date1) - date2 = datetime2.date() - time2 = datetime2.time() - - # The returned value should be a datetime - assert isinstance(datetime2, datetime.datetime) - assert isinstance(date2, datetime.date) - assert isinstance(time2, datetime.time) - - # They should be identical (no information lost on roundtrip) - diff = abs(date1 - date2) - assert diff.days == 0 - assert diff.seconds == 0 - assert diff.microseconds == 0 - - # Year, Month & Day should be the same after the round trip - assert date1.year == date2.year - assert date1.month == date2.month - assert date1.day == date2.day - - # There should be no time information - assert time2.hour == 0 - assert time2.minute == 0 - assert time2.second == 0 - assert time2.microsecond == 0 - - -SKIP_TZ_ENV_ON_WIN = pytest.mark.skipif( - "env.WIN", reason="TZ environment variable only supported on POSIX" -) - - -@pytest.mark.parametrize("time1", [ - datetime.datetime.today().time(), - datetime.time(0, 0, 0), - datetime.time(0, 0, 0, 1), - datetime.time(0, 28, 45, 109827), - datetime.time(0, 59, 59, 999999), - datetime.time(1, 0, 0), - datetime.time(5, 59, 59, 0), - datetime.time(5, 59, 59, 1), -]) -@pytest.mark.parametrize("tz", [ - None, - pytest.param("Europe/Brussels", marks=SKIP_TZ_ENV_ON_WIN), - pytest.param("Asia/Pyongyang", marks=SKIP_TZ_ENV_ON_WIN), - pytest.param("America/New_York", marks=SKIP_TZ_ENV_ON_WIN), -]) -def test_chrono_system_clock_roundtrip_time(time1, tz, monkeypatch): - if tz is not None: - monkeypatch.setenv("TZ", "/usr/share/zoneinfo/{}".format(tz)) - - # Roundtrip the time - datetime2 = m.test_chrono2(time1) - date2 = datetime2.date() - time2 = datetime2.time() - - # The returned value should be a datetime - assert isinstance(datetime2, datetime.datetime) - assert isinstance(date2, datetime.date) - assert isinstance(time2, datetime.time) - - # Hour, Minute, Second & Microsecond should be the same after the round trip - assert time1.hour == time2.hour - assert time1.minute == time2.minute - assert time1.second == time2.second - assert time1.microsecond == time2.microsecond - - # There should be no date information (i.e. date = python base date) - assert date2.year == 1970 - assert date2.month == 1 - assert date2.day == 1 - - -def test_chrono_duration_roundtrip(): - - # Get the difference between two times (a timedelta) - date1 = datetime.datetime.today() - date2 = datetime.datetime.today() - diff = date2 - date1 - - # Make sure this is a timedelta - assert isinstance(diff, datetime.timedelta) - - cpp_diff = m.test_chrono3(diff) - - assert cpp_diff.days == diff.days - assert cpp_diff.seconds == diff.seconds - assert cpp_diff.microseconds == diff.microseconds - - -def test_chrono_duration_subtraction_equivalence(): - - date1 = datetime.datetime.today() - date2 = datetime.datetime.today() - - diff = date2 - date1 - cpp_diff = m.test_chrono4(date2, date1) - - assert cpp_diff.days == diff.days - assert cpp_diff.seconds == diff.seconds - assert cpp_diff.microseconds == diff.microseconds - - -def test_chrono_duration_subtraction_equivalence_date(): - - date1 = datetime.date.today() - date2 = datetime.date.today() - - diff = date2 - date1 - cpp_diff = m.test_chrono4(date2, date1) - - assert cpp_diff.days == diff.days - assert cpp_diff.seconds == diff.seconds - assert cpp_diff.microseconds == diff.microseconds - - -def test_chrono_steady_clock(): - time1 = m.test_chrono5() - assert isinstance(time1, datetime.timedelta) - - -def test_chrono_steady_clock_roundtrip(): - time1 = datetime.timedelta(days=10, seconds=10, microseconds=100) - time2 = m.test_chrono6(time1) - - assert isinstance(time2, datetime.timedelta) - - # They should be identical (no information lost on roundtrip) - assert time1.days == time2.days - assert time1.seconds == time2.seconds - assert time1.microseconds == time2.microseconds - - -def test_floating_point_duration(): - # Test using a floating point number in seconds - time = m.test_chrono7(35.525123) - - assert isinstance(time, datetime.timedelta) - - assert time.seconds == 35 - assert 525122 <= time.microseconds <= 525123 - - diff = m.test_chrono_float_diff(43.789012, 1.123456) - assert diff.seconds == 42 - assert 665556 <= diff.microseconds <= 665557 - - -def test_nano_timepoint(): - time = datetime.datetime.now() - time1 = m.test_nano_timepoint(time, datetime.timedelta(seconds=60)) - assert(time1 == time + datetime.timedelta(seconds=60)) diff --git a/spaces/ma-xu/LIVE/thrust/thrust/detail/memory_algorithms.h b/spaces/ma-xu/LIVE/thrust/thrust/detail/memory_algorithms.h deleted file mode 100644 index ffa25aff8b564218dd43d1c8ac82b8b7d5962e10..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/detail/memory_algorithms.h +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) 2018 NVIDIA Corporation -// Author: Bryce Adelstein Lelbach -// -// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt) - -// TODO: These need to be turned into proper Thrust algorithms (dispatch layer, -// backends, etc). - -#pragma once - -#include -#include -#include -#include -#include - -#include -#include -#include - -namespace thrust -{ - -/////////////////////////////////////////////////////////////////////////////// - -template -__host__ __device__ -void destroy_at(T* location) -{ - location->~T(); -} - -template -__host__ __device__ -void destroy_at(Allocator const& alloc, T* location) -{ - typedef typename detail::allocator_traits< - typename detail::remove_cv< - typename detail::remove_reference::type - >::type - >::template rebind_traits::other traits; - - typename traits::allocator_type alloc_T(alloc); - - traits::destroy(alloc_T, location); -} - -template -__host__ __device__ -ForwardIt destroy(ForwardIt first, ForwardIt last) -{ - for (; first != last; ++first) - destroy_at(addressof(*first)); - - return first; -} - -template -__host__ __device__ -ForwardIt destroy(Allocator const& alloc, ForwardIt first, ForwardIt last) -{ - typedef typename iterator_traits::value_type T; - typedef typename detail::allocator_traits< - typename detail::remove_cv< - typename detail::remove_reference::type - >::type - >::template rebind_traits::other traits; - - typename traits::allocator_type alloc_T(alloc); - - for (; first != last; ++first) - destroy_at(alloc_T, addressof(*first)); - - return first; -} - -template -__host__ __device__ -ForwardIt destroy_n(ForwardIt first, Size n) -{ - for (; n > 0; (void) ++first, --n) - destroy_at(addressof(*first)); - - return first; -} - -template -__host__ __device__ -ForwardIt destroy_n(Allocator const& alloc, ForwardIt first, Size n) -{ - typedef typename iterator_traits::value_type T; - typedef typename detail::allocator_traits< - typename detail::remove_cv< - typename detail::remove_reference::type - >::type - >::template rebind_traits::other traits; - - typename traits::allocator_type alloc_T(alloc); - - for (; n > 0; (void) ++first, --n) - destroy_at(alloc_T, addressof(*first)); - - return first; -} - -#if THRUST_CPP_DIALECT >= 2011 -template -__host__ __device__ -void uninitialized_construct( - ForwardIt first, ForwardIt last, Args const&... args -) -{ - using T = typename iterator_traits::value_type; - - ForwardIt current = first; - #if !__CUDA_ARCH__ // No exceptions in CUDA. - try { - #endif - for (; current != last; ++current) - ::new (static_cast(addressof(*current))) T(args...); - #if !__CUDA_ARCH__ // No exceptions in CUDA. - } catch (...) { - destroy(first, current); - throw; - } - #endif -} - -template -void uninitialized_construct_with_allocator( - Allocator const& alloc, ForwardIt first, ForwardIt last, Args const&... args -) -{ - using T = typename iterator_traits::value_type; - using traits = typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >::template rebind_traits; - - typename traits::allocator_type alloc_T(alloc); - - ForwardIt current = first; - #if !__CUDA_ARCH__ // No exceptions in CUDA. - try { - #endif - for (; current != last; ++current) - traits::construct(alloc_T, addressof(*current), args...); - #if !__CUDA_ARCH__ // No exceptions in CUDA. - } catch (...) { - destroy(alloc_T, first, current); - throw; - } - #endif -} - -template -void uninitialized_construct_n( - ForwardIt first, Size n, Args const&... args -) -{ - using T = typename iterator_traits::value_type; - - ForwardIt current = first; - #if !__CUDA_ARCH__ // No exceptions in CUDA. - try { - #endif - for (; n > 0; (void) ++current, --n) - ::new (static_cast(addressof(*current))) T(args...); - #if !__CUDA_ARCH__ // No exceptions in CUDA. - } catch (...) { - destroy(first, current); - throw; - } - #endif -} - -template -void uninitialized_construct_n_with_allocator( - Allocator const& alloc, ForwardIt first, Size n, Args const&... args -) -{ - using T = typename iterator_traits::value_type; - using traits = typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >::template rebind_traits; - - typename traits::allocator_type alloc_T(alloc); - - ForwardIt current = first; - #if !__CUDA_ARCH__ // No exceptions in CUDA. - try { - #endif - for (; n > 0; (void) ++current, --n) - traits::construct(alloc_T, addressof(*current), args...); - #if !__CUDA_ARCH__ // No exceptions in CUDA. - } catch (...) { - destroy(alloc_T, first, current); - throw; - } - #endif -} -#endif - -/////////////////////////////////////////////////////////////////////////////// - -} // end namespace thrust - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/detail/static_assert.h b/spaces/ma-xu/LIVE/thrust/thrust/detail/static_assert.h deleted file mode 100644 index 52674dcaf18ef6459b6ef826a524623162ce0f23..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/detail/static_assert.h +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2008-2018 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * (C) Copyright John Maddock 2000. - * - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying NOTICE file for the complete license) - * - * For more information, see http://www.boost.org - */ - -#pragma once - -#include -#include -#include - -namespace thrust -{ - -namespace detail -{ - -template -struct depend_on_instantiation -{ - THRUST_INLINE_INTEGRAL_MEMBER_CONSTANT bool value = x; -}; - -#if THRUST_CPP_DIALECT >= 2011 - -# if THRUST_CPP_DIALECT >= 2017 -# define THRUST_STATIC_ASSERT(B) static_assert(B) -# else -# define THRUST_STATIC_ASSERT(B) static_assert(B, "static assertion failed") -# endif -# define THRUST_STATIC_ASSERT_MSG(B, msg) static_assert(B, msg) - -#else // Older than C++11. - -// HP aCC cannot deal with missing names for template value parameters. -template struct STATIC_ASSERTION_FAILURE; - -template <> struct STATIC_ASSERTION_FAILURE {}; - -// HP aCC cannot deal with missing names for template value parameters. -template struct static_assert_test {}; - -#if ( (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) \ - && (THRUST_GCC_VERSION >= 40800)) \ - || (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_CLANG) - // Clang and GCC 4.8+ will complain about this typedef being unused unless we - // annotate it as such. -# define THRUST_STATIC_ASSERT(B) \ - typedef ::thrust::detail::static_assert_test< \ - sizeof(::thrust::detail::STATIC_ASSERTION_FAILURE<(bool)(B)>) \ - > \ - THRUST_PP_CAT2(thrust_static_assert_typedef_, __LINE__) \ - __attribute__((unused)) \ - /**/ -#else -# define THRUST_STATIC_ASSERT(B) \ - typedef ::thrust::detail::static_assert_test< \ - sizeof(::thrust::detail::STATIC_ASSERTION_FAILURE<(bool)(B)>) \ - > \ - THRUST_PP_CAT2(thrust_static_assert_typedef_, __LINE__) \ - /**/ -#endif - -#define THRUST_STATIC_ASSERT_MSG(B, msg) THRUST_STATIC_ASSERT(B) - -#endif // THRUST_CPP_DIALECT >= 2011 - -} // namespace detail - -} // end namespace thrust - - diff --git a/spaces/maiti/stable-fashion/main.py b/spaces/maiti/stable-fashion/main.py deleted file mode 100644 index a0ab2d2db00c0079bc9cc75932708abf2e268359..0000000000000000000000000000000000000000 --- a/spaces/maiti/stable-fashion/main.py +++ /dev/null @@ -1,112 +0,0 @@ -from diffusers import StableDiffusionInpaintPipeline -import os - -from tqdm import tqdm -from PIL import Image -import numpy as np -import cv2 -import warnings - -warnings.filterwarnings("ignore", category=FutureWarning) -warnings.filterwarnings("ignore", category=DeprecationWarning) - -import torch -import torch.nn.functional as F -import torchvision.transforms as transforms - -from data.base_dataset import Normalize_image -from utils.saving_utils import load_checkpoint_mgpu -from networks import U2NET -import argparse -from enum import Enum -from rembg import remove - -class Parts: - UPPER = 1 - LOWER = 2 - -def parse_arguments(): - parser = argparse.ArgumentParser( - description="Stable Fashion API, allows you to picture yourself in any cloth your imagination can think of!" - ) - parser.add_argument('--image', type=str, required=True, help='path to image') - parser.add_argument('--part', choices=['upper', 'lower'], default='upper', type=str) - parser.add_argument('--resolution', choices=[256, 512, 1024, 2048], default=256, type=int) - parser.add_argument('--prompt', type=str, default="A pink cloth") - parser.add_argument('--num_steps', type=int, default=5) - parser.add_argument('--guidance_scale', type=float, default=7.5) - parser.add_argument('--rembg', action='store_true') - parser.add_argument('--output', default='output.jpg', type=str) - args, _ = parser.parse_known_args() - return args - - -def load_u2net(): - device = "cuda" if torch.cuda.is_available() else "cpu" - checkpoint_path = os.path.join("trained_checkpoint", "cloth_segm_u2net_latest.pth") - net = U2NET(in_ch=3, out_ch=4) - net = load_checkpoint_mgpu(net, checkpoint_path) - net = net.to(device) - net = net.eval() - return net - -def change_bg_color(rgba_image, color): - new_image = Image.new("RGBA", rgba_image.size, color) - new_image.paste(rgba_image, (0, 0), rgba_image) - return new_image.convert("RGB") - - -def load_inpainting_pipeline(): - device = "cuda" if torch.cuda.is_available() else "cpu" - inpainting_pipeline = StableDiffusionInpaintPipeline.from_pretrained( - "runwayml/stable-diffusion-inpainting", - revision="fp16", - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - ).to(device) - return inpainting_pipeline -def process_image(args, inpainting_pipeline, net): - device = "cuda" if torch.cuda.is_available() else "cpu" - image_path = args.image - transforms_list = [] - transforms_list += [transforms.ToTensor()] - transforms_list += [Normalize_image(0.5, 0.5)] - transform_rgb = transforms.Compose(transforms_list) - img = Image.open(image_path) - img = img.convert("RGB") - img = img.resize((args.resolution, args.resolution)) - if args.rembg: - img_with_green_bg = remove(img) - img_with_green_bg = change_bg_color(img_with_green_bg, color="GREEN") - img_with_green_bg = img_with_green_bg.convert("RGB") - else: - img_with_green_bg = img - image_tensor = transform_rgb(img_with_green_bg) - image_tensor = image_tensor.unsqueeze(0) - output_tensor = net(image_tensor.to(device)) - output_tensor = F.log_softmax(output_tensor[0], dim=1) - output_tensor = torch.max(output_tensor, dim=1, keepdim=True)[1] - output_tensor = torch.squeeze(output_tensor, dim=0) - output_tensor = torch.squeeze(output_tensor, dim=0) - output_arr = output_tensor.cpu().numpy() - mask_code = eval(f"Parts.{args.part.upper()}") - mask = (output_arr == mask_code) - output_arr[mask] = 1 - output_arr[~mask] = 0 - output_arr *= 255 - mask_PIL = Image.fromarray(output_arr.astype("uint8"), mode="L") - clothed_image_from_pipeline = inpainting_pipeline(prompt=args.prompt, - image=img_with_green_bg, - mask_image=mask_PIL, - width=args.resolution, - height=args.resolution, - guidance_scale=args.guidance_scale, - num_inference_steps=args.num_steps).images[0] - clothed_image_from_pipeline = remove(clothed_image_from_pipeline) - clothed_image_from_pipeline = change_bg_color(clothed_image_from_pipeline, "WHITE") - return clothed_image_from_pipeline.convert("RGB") -if __name__ == '__main__': - args = parse_arguments() - net = load_u2net() - inpainting_pipeline = load_inpainting_pipeline() - result_image = process_image(args, inpainting_pipeline, net) - result_image.save(args.output) diff --git a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/utils/file_client.py b/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/utils/file_client.py deleted file mode 100644 index 89d83ab9e0d4314f8cdf2393908a561c6d1dca92..0000000000000000000000000000000000000000 --- a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/utils/file_client.py +++ /dev/null @@ -1,167 +0,0 @@ -# Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py # noqa: E501 -from abc import ABCMeta, abstractmethod - - -class BaseStorageBackend(metaclass=ABCMeta): - """Abstract class of storage backends. - - All backends need to implement two apis: ``get()`` and ``get_text()``. - ``get()`` reads the file as a byte stream and ``get_text()`` reads the file - as texts. - """ - - @abstractmethod - def get(self, filepath): - pass - - @abstractmethod - def get_text(self, filepath): - pass - - -class MemcachedBackend(BaseStorageBackend): - """Memcached storage backend. - - Attributes: - server_list_cfg (str): Config file for memcached server list. - client_cfg (str): Config file for memcached client. - sys_path (str | None): Additional path to be appended to `sys.path`. - Default: None. - """ - - def __init__(self, server_list_cfg, client_cfg, sys_path=None): - if sys_path is not None: - import sys - sys.path.append(sys_path) - try: - import mc - except ImportError: - raise ImportError('Please install memcached to enable MemcachedBackend.') - - self.server_list_cfg = server_list_cfg - self.client_cfg = client_cfg - self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, self.client_cfg) - # mc.pyvector servers as a point which points to a memory cache - self._mc_buffer = mc.pyvector() - - def get(self, filepath): - filepath = str(filepath) - import mc - self._client.Get(filepath, self._mc_buffer) - value_buf = mc.ConvertBuffer(self._mc_buffer) - return value_buf - - def get_text(self, filepath): - raise NotImplementedError - - -class HardDiskBackend(BaseStorageBackend): - """Raw hard disks storage backend.""" - - def get(self, filepath): - filepath = str(filepath) - with open(filepath, 'rb') as f: - value_buf = f.read() - return value_buf - - def get_text(self, filepath): - filepath = str(filepath) - with open(filepath, 'r') as f: - value_buf = f.read() - return value_buf - - -class LmdbBackend(BaseStorageBackend): - """Lmdb storage backend. - - Args: - db_paths (str | list[str]): Lmdb database paths. - client_keys (str | list[str]): Lmdb client keys. Default: 'default'. - readonly (bool, optional): Lmdb environment parameter. If True, - disallow any write operations. Default: True. - lock (bool, optional): Lmdb environment parameter. If False, when - concurrent access occurs, do not lock the database. Default: False. - readahead (bool, optional): Lmdb environment parameter. If False, - disable the OS filesystem readahead mechanism, which may improve - random read performance when a database is larger than RAM. - Default: False. - - Attributes: - db_paths (list): Lmdb database path. - _client (list): A list of several lmdb envs. - """ - - def __init__(self, db_paths, client_keys='default', readonly=True, lock=False, readahead=False, **kwargs): - try: - import lmdb - except ImportError: - raise ImportError('Please install lmdb to enable LmdbBackend.') - - if isinstance(client_keys, str): - client_keys = [client_keys] - - if isinstance(db_paths, list): - self.db_paths = [str(v) for v in db_paths] - elif isinstance(db_paths, str): - self.db_paths = [str(db_paths)] - assert len(client_keys) == len(self.db_paths), ('client_keys and db_paths should have the same length, ' - f'but received {len(client_keys)} and {len(self.db_paths)}.') - - self._client = {} - for client, path in zip(client_keys, self.db_paths): - self._client[client] = lmdb.open(path, readonly=readonly, lock=lock, readahead=readahead, **kwargs) - - def get(self, filepath, client_key): - """Get values according to the filepath from one lmdb named client_key. - - Args: - filepath (str | obj:`Path`): Here, filepath is the lmdb key. - client_key (str): Used for distinguishing different lmdb envs. - """ - filepath = str(filepath) - assert client_key in self._client, (f'client_key {client_key} is not in lmdb clients.') - client = self._client[client_key] - with client.begin(write=False) as txn: - value_buf = txn.get(filepath.encode('ascii')) - return value_buf - - def get_text(self, filepath): - raise NotImplementedError - - -class FileClient(object): - """A general file client to access files in different backend. - - The client loads a file or text in a specified backend from its path - and return it as a binary file. it can also register other backend - accessor with a given name and backend class. - - Attributes: - backend (str): The storage backend type. Options are "disk", - "memcached" and "lmdb". - client (:obj:`BaseStorageBackend`): The backend object. - """ - - _backends = { - 'disk': HardDiskBackend, - 'memcached': MemcachedBackend, - 'lmdb': LmdbBackend, - } - - def __init__(self, backend='disk', **kwargs): - if backend not in self._backends: - raise ValueError(f'Backend {backend} is not supported. Currently supported ones' - f' are {list(self._backends.keys())}') - self.backend = backend - self.client = self._backends[backend](**kwargs) - - def get(self, filepath, client_key='default'): - # client_key is used only for lmdb, where different fileclients have - # different lmdb environments. - if self.backend == 'lmdb': - return self.client.get(filepath, client_key) - else: - return self.client.get(filepath) - - def get_text(self, filepath): - return self.client.get_text(filepath) diff --git a/spaces/marioboy/neil-breen/synthesizer/audio.py b/spaces/marioboy/neil-breen/synthesizer/audio.py deleted file mode 100644 index 83dc96c63c962bc8e13c446d05e27c009fb3239f..0000000000000000000000000000000000000000 --- a/spaces/marioboy/neil-breen/synthesizer/audio.py +++ /dev/null @@ -1,206 +0,0 @@ -import librosa -import librosa.filters -import numpy as np -from scipy import signal -from scipy.io import wavfile -import soundfile as sf - - -def load_wav(path, sr): - return librosa.core.load(path, sr=sr)[0] - -def save_wav(wav, path, sr): - wav *= 32767 / max(0.01, np.max(np.abs(wav))) - #proposed by @dsmiller - wavfile.write(path, sr, wav.astype(np.int16)) - -def save_wavenet_wav(wav, path, sr): - sf.write(path, wav.astype(np.float32), sr) - -def preemphasis(wav, k, preemphasize=True): - if preemphasize: - return signal.lfilter([1, -k], [1], wav) - return wav - -def inv_preemphasis(wav, k, inv_preemphasize=True): - if inv_preemphasize: - return signal.lfilter([1], [1, -k], wav) - return wav - -#From https://github.com/r9y9/wavenet_vocoder/blob/master/audio.py -def start_and_end_indices(quantized, silence_threshold=2): - for start in range(quantized.size): - if abs(quantized[start] - 127) > silence_threshold: - break - for end in range(quantized.size - 1, 1, -1): - if abs(quantized[end] - 127) > silence_threshold: - break - - assert abs(quantized[start] - 127) > silence_threshold - assert abs(quantized[end] - 127) > silence_threshold - - return start, end - -def get_hop_size(hparams): - hop_size = hparams.hop_size - if hop_size is None: - assert hparams.frame_shift_ms is not None - hop_size = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate) - return hop_size - -def linearspectrogram(wav, hparams): - D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams) - S = _amp_to_db(np.abs(D), hparams) - hparams.ref_level_db - - if hparams.signal_normalization: - return _normalize(S, hparams) - return S - -def melspectrogram(wav, hparams): - D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams) - S = _amp_to_db(_linear_to_mel(np.abs(D), hparams), hparams) - hparams.ref_level_db - - if hparams.signal_normalization: - return _normalize(S, hparams) - return S - -def inv_linear_spectrogram(linear_spectrogram, hparams): - """Converts linear spectrogram to waveform using librosa""" - if hparams.signal_normalization: - D = _denormalize(linear_spectrogram, hparams) - else: - D = linear_spectrogram - - S = _db_to_amp(D + hparams.ref_level_db) #Convert back to linear - - if hparams.use_lws: - processor = _lws_processor(hparams) - D = processor.run_lws(S.astype(np.float64).T ** hparams.power) - y = processor.istft(D).astype(np.float32) - return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize) - else: - return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize) - -def inv_mel_spectrogram(mel_spectrogram, hparams): - """Converts mel spectrogram to waveform using librosa""" - if hparams.signal_normalization: - D = _denormalize(mel_spectrogram, hparams) - else: - D = mel_spectrogram - - S = _mel_to_linear(_db_to_amp(D + hparams.ref_level_db), hparams) # Convert back to linear - - if hparams.use_lws: - processor = _lws_processor(hparams) - D = processor.run_lws(S.astype(np.float64).T ** hparams.power) - y = processor.istft(D).astype(np.float32) - return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize) - else: - return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize) - -def _lws_processor(hparams): - import lws - return lws.lws(hparams.n_fft, get_hop_size(hparams), fftsize=hparams.win_size, mode="speech") - -def _griffin_lim(S, hparams): - """librosa implementation of Griffin-Lim - Based on https://github.com/librosa/librosa/issues/434 - """ - angles = np.exp(2j * np.pi * np.random.rand(*S.shape)) - S_complex = np.abs(S).astype(np.complex) - y = _istft(S_complex * angles, hparams) - for i in range(hparams.griffin_lim_iters): - angles = np.exp(1j * np.angle(_stft(y, hparams))) - y = _istft(S_complex * angles, hparams) - return y - -def _stft(y, hparams): - if hparams.use_lws: - return _lws_processor(hparams).stft(y).T - else: - return librosa.stft(y=y, n_fft=hparams.n_fft, hop_length=get_hop_size(hparams), win_length=hparams.win_size) - -def _istft(y, hparams): - return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams.win_size) - -########################################################## -#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!) -def num_frames(length, fsize, fshift): - """Compute number of time frames of spectrogram - """ - pad = (fsize - fshift) - if length % fshift == 0: - M = (length + pad * 2 - fsize) // fshift + 1 - else: - M = (length + pad * 2 - fsize) // fshift + 2 - return M - - -def pad_lr(x, fsize, fshift): - """Compute left and right padding - """ - M = num_frames(len(x), fsize, fshift) - pad = (fsize - fshift) - T = len(x) + 2 * pad - r = (M - 1) * fshift + fsize - T - return pad, pad + r -########################################################## -#Librosa correct padding -def librosa_pad_lr(x, fsize, fshift): - return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0] - -# Conversions -_mel_basis = None -_inv_mel_basis = None - -def _linear_to_mel(spectogram, hparams): - global _mel_basis - if _mel_basis is None: - _mel_basis = _build_mel_basis(hparams) - return np.dot(_mel_basis, spectogram) - -def _mel_to_linear(mel_spectrogram, hparams): - global _inv_mel_basis - if _inv_mel_basis is None: - _inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams)) - return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram)) - -def _build_mel_basis(hparams): - assert hparams.fmax <= hparams.sample_rate // 2 - return librosa.filters.mel(hparams.sample_rate, hparams.n_fft, n_mels=hparams.num_mels, - fmin=hparams.fmin, fmax=hparams.fmax) - -def _amp_to_db(x, hparams): - min_level = np.exp(hparams.min_level_db / 20 * np.log(10)) - return 20 * np.log10(np.maximum(min_level, x)) - -def _db_to_amp(x): - return np.power(10.0, (x) * 0.05) - -def _normalize(S, hparams): - if hparams.allow_clipping_in_normalization: - if hparams.symmetric_mels: - return np.clip((2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value, - -hparams.max_abs_value, hparams.max_abs_value) - else: - return np.clip(hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)), 0, hparams.max_abs_value) - - assert S.max() <= 0 and S.min() - hparams.min_level_db >= 0 - if hparams.symmetric_mels: - return (2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value - else: - return hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - -def _denormalize(D, hparams): - if hparams.allow_clipping_in_normalization: - if hparams.symmetric_mels: - return (((np.clip(D, -hparams.max_abs_value, - hparams.max_abs_value) + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) - + hparams.min_level_db) - else: - return ((np.clip(D, 0, hparams.max_abs_value) * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db) - - if hparams.symmetric_mels: - return (((D + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + hparams.min_level_db) - else: - return ((D * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db) diff --git a/spaces/marlenezw/audio-driven-animations/MakeItTalk/src/autovc/retrain_version/vocoder_spec/utils.py b/spaces/marlenezw/audio-driven-animations/MakeItTalk/src/autovc/retrain_version/vocoder_spec/utils.py deleted file mode 100644 index fea6e8ab01d15ea1cb3a70a6aed43527675d30a1..0000000000000000000000000000000000000000 --- a/spaces/marlenezw/audio-driven-animations/MakeItTalk/src/autovc/retrain_version/vocoder_spec/utils.py +++ /dev/null @@ -1,263 +0,0 @@ -import os - -def _get_padding_conv2d(input_size, output_size, kernel_size, stride, dilation=[1,1]): - Pr = (output_size[0]-1)*stride[0]+(kernel_size[0]-1)*dilation[0]+1-input_size[0] - Pc = (output_size[1]-1)*stride[1]+(kernel_size[1]-1)*dilation[1]+1-input_size[1] - padding_h = (Pr/2, Pr-Pr/2) - padding_w = (Pc/2, Pc-Pc/2) - print(padding_h, padding_w) - - -def _get_padding_deconv2d(input_size, output_size, kernel_size, stride): - padding_h = (input_size[0]-1)*stride[0]+kernel_size[0]-output_size[0] - padding_w = (input_size[1]-1)*stride[1]+kernel_size[1]-output_size[1] - print(padding_h/2, padding_w/2) - - -def _conv2d_simulator(input_dim, kernel_size, stride, padding, dilation=[1,1]): - h_out = (input_dim[0]+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0] + 1 - w_out = (input_dim[1]+2*padding[1]-dilation[1]*(kernel_size[1]-1)-1)/stride[1] + 1 - print('Floor of:', h_out, w_out) - - -def _deconv2d_simulator(input_dim, kernel_size, stride, padding, dilation=[1,1]): - h_out = (input_dim[0]-1)*stride[0]-2*padding[0]+kernel_size[0] - w_out = (input_dim[1]-1)*stride[1]-2*padding[1]+kernel_size[1] - print(h_out, w_out) - - - -import numpy as np -import librosa -import pysptk -from scipy import signal -import pyworld as pw -import copy -import pdb - -def sptk_left_signal_padding(x, count): - x = np.pad(x, (count,0), 'constant', constant_values=(0, 0)) - return x - -def sptk_frame_zero_padding(x, winsz): - x = np.pad(x, ((0,0),(winsz//2,winsz//2)), 'constant', constant_values=(0, 0)) - return x - -def sptk_signal_padding(x, count): - x = np.pad(x, (count,count), 'constant', constant_values=(0, 0)) - return x - -def sptk_window(x, framesz, hopsz, winsz=None, windowing=None, normalize=False): - x = librosa.util.frame(sptk_signal_padding(x, framesz//2), frame_length=framesz, hop_length=hopsz) - if windowing is not None: - win = pysptk.blackman(framesz) - x = x.T * win - else: - x = x.T - if winsz is not None and winsz != framesz: - x = sptk_frame_zero_padding(x, winsz-framesz) - if normalize: - x = x / np.sqrt(np.expand_dims(sum(x**2, 1), 1) + 1e-16) - return x - -def hz2alpha(hz): - alpha = 0.313 * np.log10(hz) + (-0.903) - alpha = np.round(alpha*100) / 100.0 - return alpha - -def sptk_mcep(x, order, winsz, hopsz, fftsz, fs, window_norm=False, noise_floor=1e-8): - alpha = hz2alpha(fs) - windowed = sptk_window(x, winsz, hopsz, fftsz, windowing='blackman', normalize=window_norm) - cep = pysptk.mcep(windowed, order=order, alpha=alpha, miniter=2, maxiter=30, - threshold=0.001, etype=1, eps=noise_floor, min_det=1.0e-6, itype=0) - return cep, alpha - - - -def my_world(x, fs, fft_size=1024, hopsz=256, lo=50, hi=550): - frame_period = hopsz / float(fs) * 1000 - _f0, t = pw.harvest(x, fs, frame_period=frame_period, f0_floor=lo, f0_ceil=hi) - f0 = pw.stonemask(x, _f0, t, fs) - sp = pw.cheaptrick(x, f0, t, fs, fft_size=fft_size, f0_floor=lo) - ap = pw.d4c(x, f0, t, fs, fft_size=fft_size) - assert x.shape[0] >= (sp.shape[0]-1) * hopsz - sig = x[:(sp.shape[0]-1) * hopsz] - assert sig.shape[0] % hopsz == 0 - return f0[:-1], sp[:-1,:], ap[:-1,:], sig - - - -def global_normalization(x, lo, hi): - # normalize logf0 to [0,1] - x = x.astype(float).copy() - uv = x==0 - x[~uv] = (x[~uv] - np.log(lo)) / (np.log(hi)-np.log(lo)) - x = np.clip(x, 0, 1) - return x - - -def speaker_normalization(f0, index_nonzero, mean_f0, std_f0): - # f0 is logf0 - f0 = f0.astype(float).copy() - #index_nonzero = f0 != 0 - f0[index_nonzero] = (f0[index_nonzero] - mean_f0) / std_f0 / 4.0 - f0[index_nonzero] = np.clip(f0[index_nonzero], -1, 1) - f0[index_nonzero] = (f0[index_nonzero] + 1) / 2.0 - return f0 - - -def speaker_normalization_tweak(f0, mean_f0, std_f0, mean_f0_trg, std_f0_trg): - # f0 is logf0 - f0 = f0.astype(float).copy() - index_nonzero = f0 != 0 - delta = (mean_f0_trg - mean_f0) * 0.1 - f0[index_nonzero] = (f0[index_nonzero] - mean_f0 + delta) / std_f0 / 4.0 - f0 = np.clip(f0, -1, 1) - f0[index_nonzero] = (f0[index_nonzero] + 1) / 2.0 - return f0 - - -def quantize_f0(x, num_bins=256): - # x is logf0 - assert x.ndim==1 - x = x.astype(float).copy() - assert (x >= 0).all() and (x <= 1).all() - uv = x==0 - x = np.round(x * (num_bins-1)) - x = x + 1 - x[uv] = 0 - enc = np.zeros((len(x), num_bins+1), dtype=np.float32) - enc[np.arange(len(x)), x.astype(np.int32)] = 1.0 - return enc - - -def quantize_f0_interp(x, num_bins=256): - # x is logf0 - assert x.ndim==1 - x = x.astype(float).copy() - uv = (x<0) - x[uv] = 0.0 - assert (x >= 0).all() and (x <= 1).all() - x = np.round(x * (num_bins-1)) - x = x + 1 - x[uv] = 0.0 - enc = np.zeros((len(x), num_bins+1), dtype=np.float32) - enc[np.arange(len(x)), x.astype(np.int32)] = 1.0 - return enc - - -def quantize_chroma(x, lo=50, hi=400, num_bins=120): - # x is f0 in Hz - assert x.ndim==1 - x = x.astype(float).copy() - uv = x==0 - x[~uv] = np.clip(x[~uv], lo/2, hi*2) - # convert to chroma f0 - x[~uv] = (np.log2(x[~uv] / 440) * 12 + 57) % 12 - # xs ~ [0,12) - x = np.floor(x / 12 * num_bins) - x = x + 1 - x[uv] = 0 - enc = np.zeros((len(x), num_bins+1), dtype=np.float32) - enc[np.arange(len(x)), x.astype(np.int32)] += 1.0 - - return enc - - - -def quantize_f0s(xs, lo=50, hi=400, num_bins=256): - # xs is logf0 - xs = copy.copy(xs) - uv = xs==0 - xs[~uv] = (xs[~uv] - np.log(lo)) / (np.log(hi)-np.log(lo)) - xs = np.clip(xs, 0, 1) - # xs ~ [0,1] - xs = np.round(xs * (num_bins-1)) - xs = xs + 1 - xs[uv] = 0 - enc = np.zeros((xs.shape[1], num_bins+1), dtype=np.float32) - for i in range(xs.shape[0]): - enc[np.arange(xs.shape[1]), xs[i].astype(np.int32)] += 1.0 - enc /= enc.sum(axis=1, keepdims=True) - return enc - - - - -def butter_highpass(cutoff, fs, order=5): - nyq = 0.5 * fs - normal_cutoff = cutoff / nyq - b, a = signal.butter(order, normal_cutoff, btype='high', analog=False) - return b, a - -def write_metadata(metadata, out_dir, sr=16000): - with open(os.path.join(out_dir, 'train.txt'), 'w', encoding='utf-8') as f: - for m in metadata: - f.write('|'.join([str(x) for x in m]) + '\n') - frames = sum([m[2] for m in metadata]) - hours = frames / sr / 3600 - print('Wrote %d utterances, %d time steps (%.2f hours)' % (len(metadata), frames, hours)) - - -def world_dio(x, fs, fft_size=1024, hopsz=256, lo=50, hi=550, thr=0.1): - frame_period = hopsz / float(fs) * 1000 - _f0, t = pw.dio(x, fs, frame_period=frame_period, f0_floor=lo, f0_ceil=hi, allowed_range=thr) - f0 = pw.stonemask(x, _f0, t, fs) - f0[f0!=0] = np.log(f0[f0!=0]) - return f0 - - -def world_harvest(x, fs, fft_size=1024, hopsz=256, lo=50, hi=550): - frame_period = hopsz / float(fs) * 1000 - _f0, t = pw.harvest(x, fs, frame_period=frame_period, f0_floor=lo, f0_ceil=hi) - f0 = pw.stonemask(x, _f0, t, fs) - f0[f0!=0] = np.log(f0[f0!=0]) - return f0 - -import torch -def get_mask_from_lengths(lengths, max_len): - ids = torch.arange(0, max_len, device=lengths.device) - mask = (ids >= lengths.unsqueeze(1)).byte() - return mask - - -def pad_sequence_cnn(sequences, padding_value=0): - - # assuming trailing dimensions and type of all the Tensors - # in sequences are same and fetching those from sequences[0] - max_size = sequences[0].size() - channel_dim = max_size[0] - max_len = max([s.size(-1) for s in sequences]) - - out_dims = (len(sequences), channel_dim, max_len) - - out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value) - for i, tensor in enumerate(sequences): - length = tensor.size(-1) - # use index notation to prevent duplicate references to the tensor - out_tensor[i, :, :length] = tensor - - return out_tensor - - - -def interp_vector(vec, t_new): - t = np.arange(vec.shape[0]) - out = np.zeros_like(vec) - for j in range(vec.shape[1]): - out[:,j] = np.interp(t_new, t, vec[:,j], left=np.nan, right=np.nan) - assert not np.isnan(out).any() - return out - - - -from scipy.interpolate import interp1d - -def interp_vector_scipy(vec, t_new): - t = np.arange(vec.shape[0]) - f_interp = interp1d(t, vec, axis=0, bounds_error=True, assume_sorted=True) - out = f_interp(t_new) - return out.astype(np.float32) - - - \ No newline at end of file diff --git a/spaces/maxmon/auto_anno/utils/format/txt_2_list.py b/spaces/maxmon/auto_anno/utils/format/txt_2_list.py deleted file mode 100644 index 1194ea2c603914dfa85f35dd6c584cd4f64845ac..0000000000000000000000000000000000000000 --- a/spaces/maxmon/auto_anno/utils/format/txt_2_list.py +++ /dev/null @@ -1,11 +0,0 @@ -import re - -def txt_2_list(txt): - split_token = r'[ ,、,;;《》<>]' - rm_token = r'["\'”“‘’。.!!?? 【】\[\]]' - - arr = re.split(split_token, txt) - arr = [re.sub(rm_token, '', item) for item in arr if item != ''] - # 从大到小排序 - arr.sort(key=lambda x: len(x), reverse=True) - return arr diff --git a/spaces/mcqueenfu/johnslegers-epic-diffusion/app.py b/spaces/mcqueenfu/johnslegers-epic-diffusion/app.py deleted file mode 100644 index 12d56ba46c931d370b152fa49983e79c77381a64..0000000000000000000000000000000000000000 --- a/spaces/mcqueenfu/johnslegers-epic-diffusion/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/johnslegers/epic-diffusion").launch() \ No newline at end of file diff --git a/spaces/merve/Grounding_DINO_demo/groundingdino/util/box_ops.py b/spaces/merve/Grounding_DINO_demo/groundingdino/util/box_ops.py deleted file mode 100644 index 781068d294e576954edb4bd07b6e0f30e4e1bcd9..0000000000000000000000000000000000000000 --- a/spaces/merve/Grounding_DINO_demo/groundingdino/util/box_ops.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Utilities for bounding box manipulation and GIoU. -""" -import torch -from torchvision.ops.boxes import box_area - - -def box_cxcywh_to_xyxy(x): - x_c, y_c, w, h = x.unbind(-1) - b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] - return torch.stack(b, dim=-1) - - -def box_xyxy_to_cxcywh(x): - x0, y0, x1, y1 = x.unbind(-1) - b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] - return torch.stack(b, dim=-1) - - -# modified from torchvision to also return the union -def box_iou(boxes1, boxes2): - area1 = box_area(boxes1) - area2 = box_area(boxes2) - - # import ipdb; ipdb.set_trace() - lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] - rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] - - wh = (rb - lt).clamp(min=0) # [N,M,2] - inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] - - union = area1[:, None] + area2 - inter - - iou = inter / (union + 1e-6) - return iou, union - - -def generalized_box_iou(boxes1, boxes2): - """ - Generalized IoU from https://giou.stanford.edu/ - - The boxes should be in [x0, y0, x1, y1] format - - Returns a [N, M] pairwise matrix, where N = len(boxes1) - and M = len(boxes2) - """ - # degenerate boxes gives inf / nan results - # so do an early check - assert (boxes1[:, 2:] >= boxes1[:, :2]).all() - assert (boxes2[:, 2:] >= boxes2[:, :2]).all() - # except: - # import ipdb; ipdb.set_trace() - iou, union = box_iou(boxes1, boxes2) - - lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) - rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) - - wh = (rb - lt).clamp(min=0) # [N,M,2] - area = wh[:, :, 0] * wh[:, :, 1] - - return iou - (area - union) / (area + 1e-6) - - -# modified from torchvision to also return the union -def box_iou_pairwise(boxes1, boxes2): - area1 = box_area(boxes1) - area2 = box_area(boxes2) - - lt = torch.max(boxes1[:, :2], boxes2[:, :2]) # [N,2] - rb = torch.min(boxes1[:, 2:], boxes2[:, 2:]) # [N,2] - - wh = (rb - lt).clamp(min=0) # [N,2] - inter = wh[:, 0] * wh[:, 1] # [N] - - union = area1 + area2 - inter - - iou = inter / union - return iou, union - - -def generalized_box_iou_pairwise(boxes1, boxes2): - """ - Generalized IoU from https://giou.stanford.edu/ - - Input: - - boxes1, boxes2: N,4 - Output: - - giou: N, 4 - """ - # degenerate boxes gives inf / nan results - # so do an early check - assert (boxes1[:, 2:] >= boxes1[:, :2]).all() - assert (boxes2[:, 2:] >= boxes2[:, :2]).all() - assert boxes1.shape == boxes2.shape - iou, union = box_iou_pairwise(boxes1, boxes2) # N, 4 - - lt = torch.min(boxes1[:, :2], boxes2[:, :2]) - rb = torch.max(boxes1[:, 2:], boxes2[:, 2:]) - - wh = (rb - lt).clamp(min=0) # [N,2] - area = wh[:, 0] * wh[:, 1] - - return iou - (area - union) / area - - -def masks_to_boxes(masks): - """Compute the bounding boxes around the provided masks - - The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. - - Returns a [N, 4] tensors, with the boxes in xyxy format - """ - if masks.numel() == 0: - return torch.zeros((0, 4), device=masks.device) - - h, w = masks.shape[-2:] - - y = torch.arange(0, h, dtype=torch.float) - x = torch.arange(0, w, dtype=torch.float) - y, x = torch.meshgrid(y, x) - - x_mask = masks * x.unsqueeze(0) - x_max = x_mask.flatten(1).max(-1)[0] - x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] - - y_mask = masks * y.unsqueeze(0) - y_max = y_mask.flatten(1).max(-1)[0] - y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] - - return torch.stack([x_min, y_min, x_max, y_max], 1) - - -if __name__ == "__main__": - x = torch.rand(5, 4) - y = torch.rand(3, 4) - iou, union = box_iou(x, y) - import ipdb - - ipdb.set_trace() diff --git a/spaces/merve/data-leak/source/third_party/weepeople.css b/spaces/merve/data-leak/source/third_party/weepeople.css deleted file mode 100644 index 33ed7472967ade6cddc630b1a2ad62597c1cd2b2..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/source/third_party/weepeople.css +++ /dev/null @@ -1,14 +0,0 @@ -/* https://github.com/propublica/weepeople This work is licensed under the Creative Commons Attribution-NonCommercial-NoDerivs 3.0 United States License */ - -@font-face { - font-family: 'WeePeople'; - src: url(data:application/font-woff2;charset=utf-8;base64,d09GMgABAAAAAGlAAA8AAAAA4KwAAGjcAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP0ZGVE0cGh4GYACCeggEEQgKg644grdwATYCJAOCHAuBEAAEIAWFbAeCNj93ZWJmBhvNoxNuTDxsHIAID7ZzNqKCjRMoBrCLIFmsRdl/fWAbSx+vtlRiwYRgHiehmaIe1S1xW9y/toIZegmaX6AImBEUXWQKwMwpfrH/PueHJEX5EKmupu3squ9sUbFcpFWzu6S1LNtybEuWWxI7kW25ptlOnE7iyInTiEkllSMVAoGeAKFdCCHHhVYOjiu00J6rcK38HccdV/yTTfuqSrvTB1VdAnssWbb1CUAz3t0Dyu/iWyXdqZwWNEky0XxglOQDnn9/d+7zbVIRiiw0sWtakTKtSQwBAFUO2WPBJtCFrMo3ZxcL9pb50Lqy+P3b0q87HaXdrwWGD4YFhtRfWoj2bBJiVfo6vVX3wcxIlgcENsufOTRkwfr9r/X/VtnTdtfeFz6BSlhJABIuY7rtjK1Tp+HOfRQgWD4+z8iY3/L1i96nd1qnV9pwAKwM/qES1c44t26FBeUFMfvgmPHiluV1C8GNRjOOvGV/dWiJPWBEEz7QE9D/7y3PAuWbBxSdVHgx7EXHiWGzDWwByNQXrdEvssgDxf5PU7NlOqfTc+V0SudS6Tv+/4e2Zj6o5WAgPwFD7TMA+gBAeQUMtE8k6Bx3ma5MKXDoS9xLx15yjqvogoVu9itPSDncEhCA1hRfYewiG8iQ6zQ2oQOn6BJzkerQHmDF1v/9EBf5Jr6dVWJ4CO2LAAAQAODDP+ErAcD1M9Gv1+nDV22fYwaAHQAIBLWByNFLACCtC94KOKTXyQ8AcAc8F50magIAADjYHnpTdhnoBi8Bz/gfOvG/CcDdDt0nwKueAwB4hCjWo/l+aQqGIRpLDAJAIqLnIB7DtrvXY/RUeZYG/oNo9vddTILRBQf8yewvZ1+dfX729p/V/Uz96a8+nZseP94FaUKzEFE519GbnMXjHxCO8oLBaDJbrDaRSbKi2h1OV547vwD+BxUWebyazx8IhopLSsvKKyqrwpGoXh2riQPg+FpwXJpjAAI4OwtsgNV+wy0AgIcBmF8FQHcFAD1mAEAlf8K4fPhV91EUlZn10LkbrSZEhPQoOXPv4xB63Rj2WSpQG2ch/kZmZyKls59fhrN3zz44u2R2bPYZXZj90+yDltlt4uz2Wd/sIf/sB7Ovzz7xRsA7u3s2Ypn1m2aruNljsw0VRt9saPZtP5TsszuD3v+5b5gdEspnuw3FketyiWt20+zEe4ezhnBg1vcvV2v2w78c6d/N8rMVsyZjAW/mDQt7zmQxGhlvJJjQf8+r4Ynf36X3E9MO27Yxi8G8YwN8B9AG+eA1sGBzWqEDLTn/gu0HTFUSYG9pWlz0o5LGgcD1MAu4H41ZNwxH9adWifuifrGzcnmR3DCjvhpOxAyl6sUrwGX9xFdJgkpLqOfgCwOMbXMqtwKgDcvTArs0sTgM5kfX/ikzUIM0Y/AwRClybsGauAQwlIcVg8vEHIeibbmp1VLwfYmHwUi66jf5F7Q6MDvnRmaQIqWmxb4gjoCDXg4Xscet8d+zmJUi+UmWASiGhgHfPVxiI2W064fvPxbEiaZgiyGKRkNxwShgEqzltG1oKww9+TG9/SupJF6Wk9W7AxCVSJppfkjb1V/FcZxh6lLkuCmGr59KRomaDjT+BWLRAa2ODAIQEaDF2ebeKa6hDqGYthAFR8fSUz/EIqrjZz1sJrgJSU0Bov1EFrkbm8ujpDHFQFAf1tPDoEtKxZku+VavyGw4S7of3hRH1iBKQLCEeEVFQbFIIulmTzqr1LTXAyzqmSAHhNFq2/eTMOPIkKKroZj60Rji0SRSVh4lSiEeEtpk6msOX2Kh+kVmuYhGabMQZI5Z50G61orMumtNSdeOfuKihL4GauGdMpHxqPJvdBLDfSXvVThEScOKrQSx7ZAuzu06ypI6YwsGuMWZetbMAIESpjVESf89484AFKZM3pBUrCCS0px8l89ZvIsVD7BUjStclmGh+3RdWLJc54me0jd8jhp/qJEs2BzYkIdiLOOzD07qFaWoEvJD4y63nIlAU0FxptgzbAQhj0IbQRJVh7VW0Mw9LjQNssPE4um+dXmG2ESDvYl5DmirktI6LTXScu5ApZVaG4RM2zhcbAcMXeni3czDvu8uP6zfK5+wMCt6HboKqoNPSA1DOcLQqTx2cTSYSNH0TJcbW5TSzT2aNDgS687l1/7L1RU56eyYvdoPGMSU2e6iCmcyyMkePdhOubuh5bIuyxW4d2fQrT7lu+qICD3UkrLqh+T2OV8sq9G2RMxaL0lAVT9ULXVMTYqXWgxPe6fdJS6bGe0vNnNrTBkuW/QVfHAsd+ye4kD0tgquWA/MRH8qfTKHta7vH0gDuYEzEDUVrcVBJkBKuDhbW7xDn6gm7rXDFVZunJTeG7pfHBNf6VsJ0JgqCAGipMf5arrE1ohVpaRZ3c4hd7ycOGf4jBJqgilL7peqcIRZFU6dixBfe0Jt01eRcw1lCzteUJvKYULPZRqFrQMzOjNqCWAxuZIgMEyeDXC9wclP/04P4tvvXjZt70fPurwnuIKDQuZZTMxhdaRJnRkfyUMYs/cZGiW8NArykRsBnmF7qLsheRIC9e/IF4expS5ObtiTtsQ9Fi7xi6PrkevaWDfomi1D9SOF7hLLO5fCPGbi6FJDMSPN4ABg0WQTuzztWwDdNGaFVOymYbmhNlPxfo8NE7weVr+Dw9qnter+oN52jZw8O5hoC+sxR6ZcOshv2rUiFhBFbTFQXUum7oJ7g2DZbFrQZoMs98MEvIFBs2O8zqjCDkIEHlLvNFrysO9KybOhgkXtWFZSWwblLOVQWI0sDkJNzA0z5mKfRRcACdCBCFlFpX5eOVk712/oXWHaujNvfwiT7y5OHkKdS15VNaf99e2DBg1Rsb7YiiYSYb/sfrSQDFNcde9kDnNv5AW0jY0lAYybmpdQyC066aJW52ZYpSbYBpzCrk6ApCQ/jt96L3KDk9CpcUTqvHvSqYOZFUuXFE7qhnqga5IaKllIzZwy1gezjU8b+Rbs/xUv39VCydeMYLQreSW+OcFwCCbkmakiA69h6HfXVHt30Ze0vS8jz8kjtk86o6oMd6ijSZmVG804mQcad3tDOTyV60tTeWTV6ATuxbaHMPUGlw3FzWmlGCZqeFTjUoBQUFuCZu5Er3leTYfssWsneODc6G5g27S7cWJf1c04iQsceUSfEbPIikyZjsxe1vBGznPoyTB8UKTY/xzzut0odeaZVffkY0T76kxhBuLeFGjehbbBC6ZMXiMYHAisBT2HnUWP9qx8pQgVzemET44LE9JSu2GiC/JyX8pLlsLSgRKFdNLulLCxcS4BBEVm4iwpZsfJ27pgRqs264/LnTBAFIFy4IN+oV/nu3QAuZSR20FqnrK2j6zHI2laDn3J7grAO4UsDM9UErHgIUXp0SacidYGYL4P+IXkGPKUnpuH1EuMbXttZ0D6zPh0Q3Om5S2uWkWm76pnNLqipib0bktbPmHAZ0tAjtS03M8IOgapyixmR4gD/ILUzM/focu/MAJE8f92GqUSTwLCM1ylspIpL0FnNZwejpwfgcrrAkgNaFMkJoy44kmNSWrZ61a/KtX2U6kw3GCrvaPYyYcp28oL1Rsiw1TzaIkixDTlc0TMCKeawjbX4DzAHMzwLIrzPY+nZd2Y1qxFCx8rYQgxEDsraQkUoTfBNbvTYvHlsPtLgNdyvroo8zOVisTkkbsmpRCAfxqGHktty1mss4wNPL2dsTJvbB2iJofjQY8MjQSZMTS0hdMCdwnrprHUUmyIhM6TcgkWpWpUX2J0t/b0gw6AHOKX+wQUfTEICuTor56hgKj8ZbIbbqt64jh2YMrjmu/Q3KZ70pocBHshETpmVCIVsiEZl0+cyErqKKiXrWeFiKcsXMnJqwUB/LFYgsdVfKmuekvJZUFSUljqaqQlb7PiNqdNsl7ixL0as1vOrnPm4/dD6lla8xWtRntoaKtM6QUjuq7ILaZ6kmRVTqaN0/IyDZPSpmfAn2epcwBoncHmFbl4aGNQZlT348GGRBwxCIDOS0hOjTUXwEa6DGNMyspZwDZTDaf6dmV+qD9LghYB7xQRoVFP28kDozxeyGQenaToG5KR/SUpGBt0Vp1BjGY5FIkikX6iw25hiSrtDZza1Fg1FbpW7EAw201CwJlMlfoRpM7RbY7D4QMc4qsHlZCNGPIjrkxcp27UF28n2zkAcF48khrJaqbdUE1vgv7xe7tpW2DGrPDIAo42BjFnPr02kzOnlxLn+XybSZEKOMUarfAXUTt6cSU3OxMxM2lwep4Y0iQseagskZzVFzcXZBoe4hc1zoO2sW9BOpVnUhg5C5ONQUPwRGk7kkvH50bDwC/rwpherb9eP54D+Hc2KugkTvLFF6mMuPkNZUbPjW6L+0N5W6yuDp1RWfJRy8gWVFp30IYqxEvym/yN0s5t2sQFW8QmDmLnzbS1dVKrDh6I7ixc+8P2TyI8WRbvp4RfVFRxLEx8VnGxUu70Xe5mqUON7LQvDYdyTcqUMjgIU084pHfzaIxxpqnI3laSCg+QPrHWKnDeY9Bpt9mDEsScDEreBKLLkSMWmktbJwVR8g+VAhfLTQ/aSdg4MohuEC+/CTR+VVwPAbE23obPRTjpJWhCG72lFpu9mMhrdRdznM7yLQCeIqS43l4XuOWeANGr+cE1I+QjyQND9Jkn/fT9q2u83C21oYox4pg2uWg7c4I4hYXtQuimHEx4jRYZHuJfGNdb5RiQrhRC3ea8tkppkVo61ufxd0KHIXeJwqq7ukhAdRiLILJz8W3HJrpJPxctRJF4OS2+EumE2TrkG7xJMH4un+16FomxNWswFwQdCFxOZVY6bovrDeRrxkvhkC5A3it3evgzqAO5hM8khVkt1W30vNAwinaSzJ72fjJnSp/EQWn2WQNZTxsQkyLha8EehRSTe3KVqy8TrcdmAIkirXki2DKc4NlqhLMOngAoB9PlmbiLmaR4KG/ExUXgTh1EixOoZu41tXBW08ZrW/VjSOpI3b11eXQc4rTo9InKzXXv7uLVho7xjaiE9vG7r/SZFRlCfTnxC1MvqO0FNx2qJG2h71XF2FLKwOZ2TS5a3LtqVwaAxoSz3jCmZOUxaLDtSGUTZAUxE1Xi+jAq/h2cfp4wpb7cRtkULe7HedwG4sfv1a6LW85mgvo0otg2j67jlW8KgSDNbKGQlFFd8dUOTo5F04O2AgwZZG/8LFbFy8XN+Y1H9R4rme8VzJ2zjdVTK4kcMM7EQrUaBi55Mc27zYprbhPDTQWbEDcbqSovwVRxDlFmQdA3eq7m2M5+Q2+SS0Knqvj6dE+sKBgWqfk/GIO+y8KUnFCpHSQ2GdyLF/KYDpP5sssZfRllso2e6lWRzKdadzt0ud3q0J1bx6718y/oTAB9FrtKUex27c5ackie6CzuRfRh6BCbVw1t4ziNAZOJeSUWMWuYR2EK+0ATVYXL+FZX8nMZtplHH87vvbMQv8zewODgjW6M/4XwiMCsguRWgU2R5oFTomK0df1Z8x7eysiXW+TLlnGsozqA1Q5YoDiiU90sKpYuHx48bvkup7VGpSAmIR76er3GE/KBEcfiLHVUbZTd5/cJ2hxtWcYzlLKYAVursG7xvuis0SsfJEeRa4drg2NXbHkYasfVX+zlTi+L0SamgPqh7k6LdTVprDZ7xsla2Aii0m0ro+aUFSmxs+dw8jyX2ec7c0y8g262XCIpRlzgKo+Ntp8LOgde++X/nNZVQZ4xiGtAbKO8K9Ad1OHZ3gOoc5vVqM8CCsgmBTnYcyYeqbb3W4aV29eKkN1c++ygDnmt57RaJC5dgZEsYxixeutq55iLkdnAfo0Cn2ATa0j3Y1Cgmd0oxkYBIlqrmdG2RtiTmlmYRUnAQXUZBqLFzpyAbdM+xVoQFz0Pope4kKOfABixLZuM3kgST2O33dmI3FIqYSPfQ/eNo3Ima7bngvXiMwaZeXxN2sZvHm3N60psj+MfkDMTxgfO4Xsrwz50VJ33b3vRcHnRMaAUsBGTYoCRCKgXFO6Jj/VwRZdEu0r44ioZmkAngHuk0wAtUUhvN4VtG8ERG1FsmxaBSLYbu17dJ0rTVNqmv6h8xGO+i8NekCMpe+8dR7oaogQPjr88nmHiwwaonTl30Ijcctptj8NT2ZsNmyaXjT5D2ZLx78PGeDHs2ybn3QBYYWgT6vpmoPJ+xZ6hoHWX99pcnJvFvik2xKObOsasTzLkJE4XWziSgzgiiuEVwDU4B94D/E/ZxOErWpuVrxugYC72sMs5f2rd5x1lmN4AlbNw3ervyV2rlnqA+hqjftk5b+8blsswsTTNp937tA2VFGzyHFhLyDN10ToLtqMW+AB5iMJb9AyiQKzIJapJxcd0sKKKFNnDNfG2JkoRyg1bDa6rEx6aC9+rjAFXpnpqTm/n46i4RymA3LtBH6khj4gDritp2zb4A7C7l/KGUuSR4sbsZDs3aQ02gdFLUK+xae4KGVzLxbtCiil07XTY0WQtHt7Xajh8aeelu4tuXHoiaUzcHzXkYe/H5xlKMWPTiivSeYvJ/R2J0kdLJ/vjE7Eii8fu/27ksosn5J5lww+rdj3tWNTFHf/R0U+UfSLslm974Rr99OWT/7x8f+fhBjWa2nwuQdKT4oMf/SwHk3v/2ntXbNBq0vYBVpNmCOEkIPFJ/7qZOiu03VFWrKcWzeHrnNWJZy/RlpSuR5ERopz01s6I0bewhPyesNlmRIRoVDSZI0Az/ZdKhAbTBA0roYH0dQn2wvazZoamW5Lwx0yND4ZIsVhMV0yXrZl3XNTNsx5gZ4Ri/sh5Mu4KHCj6Z++OtQy/Nb1BpTe1W57MzbftT13WFD0TaZpNW3EeVLybHvwplkdiyT9lHCJTyjMmRTGbThxcG8OgyhC2ykCzx7dJsmnwu8BcGG7OEvV1GYXRQzqZlDEln5CVIFi05sySYih288KIci6vodSx6F1KgWQ1kzK0MTbbTX30lkB4Ze5/fney0KxR8fgbv3cC5K62wvK5QPPhs1ASRacDVMRvWNzQWzMN02C3Mq+U/gVrohu+yG66T9EPqDCakNEus4ii578NRXJp9OVkjSjBQ6fIMrF4lUFK+vi0xfUwXvf5rhgGpV7rOMbL8KGaLozbRL3bRkul4FpO5X3Geaddvc1L8m+/XXzZ/UTbz+7Z4zutWPFIoX6Ac0Yz3VTQeSmpveyV9rM2x+U/mx3mXX0RZD6cDdJ2iPlBzpyyBXYDD8wmBLWofOxV+qiWztZgX2m5lAfogs3oo1yncqYZ8WRNboIkHG8xa6SiwwfHvhvzefsvURa32xCoHdXJo9/1U5LhHAKDtCRxvCgsTW+ANoUG4Yr331lccY1MlbwUKzdMX4jTJwkpssNxcXKTg+qpbe5pZxJP+Tv0tjsQ0/zarJ1uriV4CcfzdnD9VtQH2bUeVS/Ytu784fG1dpImre0rl4e0kg9FrHYF9tHdlyYqzTmLiRoyA5BWDQKJXSXzNF8cP5ufQUDsrggrALzU3E9ZTC0SlS96iB58AIYL5q6DNhtqfj1VyAOQTXq1/RJomgnxMSJGT/jKdNQfQZ9mwj5AxflmXTgeZ+hhNNqpC4aVO9QjpDKsR4tEm9EBFyMLncgfJV+0Z1lYLrjS9/YDb6n2+WMMNSMzo2Bmh74t+NnDj21XLDJrGcoXaaR88GzN698R3JbhRxWW8ZGgSHlc9JGagjfU0oe7dq9dtediJ6SwBSGzFTRwA5o2n40HvugYC6rI7sPtrFCUxWQUCN4srIUV+1PgK1pJwRrt0JsTOEhtN/Cg+8gTD9SS3+okUWTnttsDYs3cqGEE+UPUmobF2drLI63wTGAU7cCA8SD049FaS2nCitFcROG4UW79m2VbK3/4pnoAFrLetCDuzRohpjNO+6OHszsRaISJE4jgH+Mwwf+RG4bqSp3CtXCFBlNiVXHcOnsSs4Q4aFXIShQ9qcFZPPRJund+8f5Tkb+bRbQtUcAjUsa+QnOTeOD5MDzuvqKteGkUIuikxi0oAua6oZm1gaDBQvjsOzg29DFq9BlYUh65WAOxc/Rn85NYasHSs3fopy7642bAi7o50h7xFBGd/A1n2HVNTFEAuQkJxfX11SMRC8aQz66GFT+t4sznbLqhzdLBtVXeYGNl6NGpKvkb2ieWRMGNu8js/zTZbCT381Nf/8P4uo8WdsL0AlAYN5dWuWPhq+i5kiKJXLGLH2oN1ScwjHQ4vwxfQysYG5FdD4A8RxrySBmZ4HmsoBCKKW6RfVwpzP0oXsHjZq6f2pNCit4c0zk0KRWJTRueRnbNvFbTzi3F4gVr2fXt9rFCgV8ieiA6dy7BJvqpD2ysmMxPRc8wmbqtvtPDFWfvKqV0moNtLd29Kwt5JJE8F+mKKXJ5qZpo5c8A8D+mf0K6H6/+hksGjYHMmNjT9A3QQewaHuPlEZzaYLYZ9g5pxCB6xpx0ga9hfkjv1cZODurNLKWVToeU99jDzAddHVZ4fyxSBgRRsYVLKN93r3LTxKSoGJyOF6sgDXFZXGFib8w4y5FciUTC4THAxn6SHEc/eEw8lcNCSzokHfRQ6tQ2km7ozmhoPAHyDYPfWTdyfYbY4ia7YtoQN8K0gpfKtbm+a2vRLxWKruCilN952Gd1pFpPiIW53gCIWCvWhyoNvRQ3IO9xq1pbolYV7A//+ONdtRIExkezjMWXmW7jaOypjT2WTU79ccBk/oV7tiLbNHjEtmXM/w/4ckjQJGjwiLgxNEx8lZcP3KRuRMpN1vXW2xvf1bpH3gnfZiLlYdKRX0bIhqaXJB/THzkKac3B/2dthjojWhqBri5W20FpKgQNpPQGM4Midd04yEB2rmU7gwRCgtEkpxKN3mlH+4Y8at9r0FD+2sEsHF+NccjsPTC2AkKfNfZusIdYqSORzCVhtjF94iqPS/6LRBcLeIbWtT5FROIZfibA1dLAMJZqM03UxHPo2kF6VL4ndERXnWNAyTmq568sueq68g7ixWQ+16xR21hbZmODGdQq50hjwW+KcpiEMpfJVR0L/0mY3tg2uGBxY7x8HhQdK92JerVYegRTFBYw6ECijyNoobGj78Jk+kbm1qDfEiUojMmksJyILQsZemg1SclQR/reoB+i89EP8XZUr+YE8o6lBEo78jCx0SZFK+todi8/+72J5Os1rqe9h9S2sBfstU+acy012oFQwmWF5ce4tdkh5brLs51zHigH3EpN3ZRJmYQZOhRO/WY2CAFTjQ8mQtjaoVV+Xwx1ZHwa8GxgV5WKjbBdrIQH4DdUqepw8GAt8LBVRraKMvGHwyOm37HhvkaxDC0/zuQKOoUJAMw0fvPCGIdC/BYCSR0InGkPrULaYxzTsU2z5aDA3EBz2DqOouIvqqHNs89fMQhwWO4d85mbK84yfonIXHhJIAnrkBoHo1xdIFXArFvoTfVuNFm13EOg09VO+WyrbO6bSuOGJMwWvcufi54tg4DkNvmiT13UxdL+Zk1bdLBXVk/951uZwREnayxeM/sfqXAp6xp7G1HJhWquo5QwZFkGuu2+XuBS/IchBChU69JGv9Hxs0ssY8dlZLHCS9xVNPezr9hB9PhJhIICzyuUrUp4nEN0JsZvI+WrXZFbegcAtTlyMHZOGsZpANJN9+AnQKfnRJ1rIeoTADTRghNLhQ7Mk0gBUZc1LEHege3/Ntus7jJyrme3wEMkl3E0ErpF5e7RYkZp5y100ZDcHz6S2XjpaKCdaxOvw9vqVEItJv07atoARfA4tS1AGq80h06jvvIfX3xwV3LAjM4eTXc08mU5cUxYdmNPN/dWqoavuuTj6JuUFQbtyKyPVH0tT1p5f2Bh5AT4PIuMcxtM6lXKrPSwNL2f/TVBs1zHEfsNxeu5qE2x0YfImp0rZuj4HJ1bhEi5HXYgMujqKLxcKUZra4TIQRTnyzD/v7qarM67YbgU6s4EZZuMY0vrXtKc3ZKO3ovhhrCdgzmAvmIdXevNoEEqoIzLWB3tZPuAXbWanxgqIulHOe1zElB7ETArEeyPWOutlWYP/TJOos02HdumqNbdBoBncIsOTLtoGmCsbbHnxhRtx7Tnc6vVBJP1zZy/c5Z4NlTlmsZ2mxfmBjlXc3WFiQOikmtRIKEppBD3wHyCNKyuJ12Jav+HONvwiT/8sdYNZp2Tl3TV7tU0LoHVkoeGlQZfgkbu9+xrObpgQjXQmLsN75rClecT6Ay7KAP9wfxiIA9i1vfu61R1JX1Ju97+FW0UkODHnpOVpcJjYBzrnyl8hg7Qqy0gCPbLBGZD/sQYYW1+2XYid+r+IO8CNvu9kJWvA6WNxMudicWkg/MYANfYkCK2dpxZlQXczsLb6m2vgDGYMeoXB0XmKq2HcohKS8pGFLq2TRzo5gF8OBcNZMTQn7VflbvFv1x5cD/GJWshLNV3SdnDR+puYCNmqKXAOZAnDsf48NQXzReAHI467+uyD63NDuDozzOO9aXBlYlZLY/POSf14gZ7IXXx8iJ28Eq0KBQvP/F0CpBNI7vN84nshYYB8kKcvGaWu6dIyuVAafbg27f3RLcgSChdkrfE12gfh530Td2WsX7Ffx3o6wzBPb6lOTOCTYbV2OIbdYv/uh8JOfM4/w9K+BUiZReib5SMJmkZgo+wmWA6Iobgj2Jdn68adDi75uYabFbxyqJqR6qUgjA7xidwWBCwBVaDMR/I9D99/0GP/Nhq9dVOPGSASo8NuT43olwTL399d19il+VKmRyHtwLBDJKwtJlwb41//Joq6/gXBnqfifPp2T/0Up9Pe5czvnCJg5OAQ7kpL5ty/TXa558Wm/2VjN+9Ym2Q7hovqs/1cfE12db5DNLaZsal2dz7T6zG4VhsnCyS1alZM8/w3gnngnm5slauKaju5zlRbWn3Z03AtrGDqfCXnxm7y3VHkyYs229ltzYEg1z4ffcQdVUsBE3ZCfpWM22CceQ0+skGUVEb1njk6iapCdrWIY249+wsN/kr3HUigu43O8PcnDXv2cS9YjN/eD63sNF4b+dh2zfTAZNE6KRzGm8ZqOxwRrhir2F25xdMf4fRO5eyvt5IMxTsM+YOfoKXE+chaF+28S4MxiwfXYtEp8Hch2+uF/JYPsuH1NQBdi8kQENuKKVkF8ygzTJljvL2PQnNtnk7iUQeZcxdAEyt0j4pt6ZYgcp7lfc2LmAWnjB5GKP+OLKuG5ZDvJ7Vb1icPxhj67WjbUPB2ZeU1owiskmcSAFJ9cG1yfV/laEx+6QMUNspD8aExap1RObC8UBDiaJQQCQKLENf9xGQR76d4fCfPMUiPbNTp2PItoNgvwlClgcNJmhoGYWCB68orrZ4/q2V1PZ8O89cLZeNgeoZyK3IcPccZZVjQvpTo5j2mNWqk0UDZfcVXWqOMCYh03KMJKjbkwByomJPtVJ1wkhk7wIHpGFOadbg8r83uZu3yh+r/tYpdxar7vdi8JJhn+uVsjDc8FfoHzBMFeJ2vuJgSS5zd1rq5pbFWGcPSP3OsqmbewLLDYblI0ulYR6W2VT0Nsnl7UCFOIEqQLlkuLQ2nN7feXR5YupRd275arUGK1D2cdxa35ljtbdsBPjk/xJExdZwq8c7+Hh5pvyY7YdJDt3PnpZPDfsjZZd5rkh9MddYNBuGmEDCv/2dum3THWirDE5jKgYgx3tk8AgInSybAFhoU3b3c6KeqrZ8+wHDpJj22zZAcA2u2s99zUpRbMfvuJnF20zT6ouY2d3h9ZyyNZ9zDYiJl+jQkU19DWqnFRX5pmoLc4/CE67jPDzuc0BNKDN1Z1aDbmV7qp/2Juqdd0lHW19KPEM7mEa9DtGUwrhjI7VAbP8KTQSxotnbQy6mpay00VrXRfug7+SuuTAw8ZGDROXPNpxjBbC4iWFu0ng9X1UdrtH6n5CHCpdLpmeIluYqOwlrPu6bGeEIYZvEFMFluHaQN89R0sw8Z6tD9FaHXGpz/sitQdLnTSHPxB9vIdcKpLKamnhJqxKXD4ON37ODA2035jWcv7xpltTssAehPNPYJDa7LFVDt0p7BA4tRGbYl0ItSDx3oqAW0BM6oSQswqI6yBBCl8vojOJXDmJuKiZO0RRe5+SS7YuAzZp5kDOd99dvn27dsjiNsPsYik7zxBc0BJTVa35pv1IyHDQwqymRwpHXZAmHTWPfsHz9Mfe1jOExABH2DBfJr7QUJqqoV7xMP828nRnf1IPZrdHXOtjBqFxhluE7Fy7k0ytEdTd90nc59ltPrBnct7GorXisv0ZbkMxcELWqANQ3cnEfmWMWz0rHJB88TOfr8Gc7NQ8BHc3fB6w6ckdvgOZwzVcZpgyfpd/dfNw1sxn5ajj4EG5p5cpOd561YrtGMEJ2drXN8bEAFiMhnHfR0H/5obG7ZWjQRXLf5ua8tWUQvScS9Tg43W7G8SMDEoyYU9Bo051VCUla1UrqgnYvBGDpBGpXlKKfA0X0d+fNUwPbKQCIrez4RxQpphurhWbMxVHhghM9lYqABzMGmTBSoRkT0MwgM4MOfcCQZQNxSpEcDWuXJALk66xPVlyFs78qyQPdJF/h0+rwrWxPahn/Mx76bKDQXkcKMAvPYddcRFR3OfwxP73LIe63qSKuBo98iBQl4hc+YRr07SUdSUb0DoYWWDK33o7fBsldlc6e9g2rrLSXaYlwR87hB947NN/Z953c/5z+yq/9QExy1f8yP5XaR2KTWgVMPmX6Rhd6d4Dp2YrKp/hwU5wS/dfCQghCG+um7b3bhtrVOpD7hj6Jv1eirb+hU22vRpapd5oBjtGliNFN33QmLtBBjQOUItcffs+w9FRarPo7fMxnO0D9XygsLoH38H5S7n6NWUp5WJW+bnJTmSsut5Bk/LT9zxEnUtgt6b+QKzTWD63yre4r1tPgmh58Qt5yOFK2gDtDUnCQa+qSwY6cisT3xLA6dx4PDtC6o9qbJC4/urm3xLp2dtm6N/sWgh6BsnVzkiAbDHEr+ikvNBtQfORiQRocDDDb0QTBee+1AliN5DZmzWv2Q8TDcJfZZW3aL508aR2bXNCc4rqno76yawHq5njSAo4L+oM1uEngWaGRVzapn/YxatX4jd4zxnXYrpt3hqtjoPEVMsUN3CRi9w42Gk1o7uOogfO1+L3NnKt0OSsZ29aisH4QJr3oADOd54YudQmmB+8z3rmGNQy3401OG8V7YfirMX0ytyHNm3n7n8RLbYWZQys2Sw3Mupq+NDV7RLrUTxH5a6lhFIBSnt22EwIDsXeZtkHW0a4L8kwDl7CdfF1++MF7VM4k/or9h5vj8orc7yxDHoSdxCK4FVj4eSUMJkFOilgalegkAlHv2Kp3wprXl7OWbgeIJg8eqEFXSaHrGiLjypRTxOu2PNoppGH5gw1JQ2Yis20YUZuwaOuwYPGkzTTfD3GZeX9hbluZVBz4iWzFX5QVbu9OvLf5VBkq2UsY9h1tlpElf5WGPdXb2V9zPaiG7BGjkq/CiBC9+0ZKeQ9/Lu5jJgVuvL/N245jUKSl3Lq50GmnRzR3+b97EgLvZTrv0P2gV+DmOq4ctlm2wivtbivMIN2Yd56ac8x1uzgleJn4GywUi6QsFlZXcs/BiC4U6OmCTgpqVI3XJ59qzcP2CgRxm6NoSB+P+cUgewBYumAk0oRvn8rP4QAX0fBfGwYm+FDuDjX+YrTYHzMlTSs2Y57q10ZJ7a8BtMD0dLQ5REEdZfduy8mXHHoTBum/594Bu9JknpREI3hcQv6//qvG2+/RewMMGvbqWfP87pv/3nxHtSP+vfnwaw3k2X5svjg4fAt7FYUHEuvn/jH/oG31RbK9e8NlczaL4pO77TAEiIxmZ3/3Omyf2/jhry0+2He1f+U992yrXfqPpn4Uhyu10vB9vjq4tJG3P9OgN3MqXl/vJgHRlo94/CPv+O6Ub/dtNjov6TzYelWZb4dV7B/HcO0AzWKyeZZh6OH8P3mmoX0eLboXwsmGX8yUKrv1+Ly56OfJt75x65/2ok7MzLv9Pa2xOaI5q+uL9JWf3HtpEMJfPv1k4No+/6/Z7Z2L42d/9oK6EdfX3En0u/9weTMhIv5SlJ99CnHK8MdwYXZ+cm5v77priqJgG82F3L/++++PCO6FJR0U/78l/Zxf14cGAGIElJUCmx1XFjT9PqstSCsP79VPVpuy9XQeNS9akPfiI6cLaEuLJLgQljwMl5b0XMvncJvadh1J/1sgV7wD+n01Cg1qL7L//jz6x7daelt56Ekh7KYlT9aelM1F3Jwo9diFIJ7bjru0+FQB146FEqTbhOZFG3qX8i+X/gkJjvb1nk7eXkkTuMAahyuLuZj21oWdQ0rNRPSDiPyP9IXUIa0W+qpE3AYJTLcgsJnpWI4Pi7b8BK/X5FkTgaMJIvWYBBW1Qr1DbnLqAlqpzJRAbi6PhTz/gj1SGBfmARKz3fPbE+DxO6xIdvMCNEgLoQuSs7rhio6KTyUvgLUYPSH9CTGjwnjxcpdonixhY/UlE1TGGSQQgKqVTRBhpg7IFuwjHkQQzTfeuDsN+wRqkIfPKqkwE4l9fmBlEPkReVdM3RH59TLPP8lkNlcNP6L8VZaAf1of3fnI59WnRlCyG+9y0Tw4kL3ylviBVRb0qRTKPgUc0kClFd+HT+FAMdCQijRzQyu2sZskZc4cJdUio5AkoW33KbIX06BU784/M2JKGHkxaLLdOr8BBD9cE5w+Z/OgcShSuc5lm6Zx1SJGzBkzcNPkS2P0ruM38TP+88WHF7mcdxU6Coy+ZNaGV/b1MYUYOEcuc/d0vQnlmABHkRiRwUVBlWQCZSfX0AyJSCwWTP+tQ6uFp/BBwtCXkAloDhTizCiTbs4U5zogwN00HsIGYOhf8vqFLYuMBnm3Jo28IeBRhz2c566pdhQgvNGBbfnyM9P2pXWVX9rvjV/4Ixjrh4GQIcEdJDL5DmLuwwKvQHIjJxXwjSGg6pkETq0v44DEpIyVYdGDs26j3FN7rayCJNeMuhVWwJP8j+YF1ar9f/0VPOhjvAOqWTOaC2UD0qFMcKvafxAOZfguYM8WJMynpK2sKQgrBpyNi+OnFLPRvCoWPottCcKNc+RGTFxWDKObdbOoyCL/Puy8/ba6Be9sz2de/lQyzouU7gJSErawoYw3wF8/AbmcsImTEegFnZI9cBRxS6krgzwzAKwyQVCk2bgJdg5yAPFMxDwUyRO8/+l1HkHo9WrRK09DfYqcwh0pf9CNQg4gjEAH58GIeifXZkCQW4UaSBYoGElh4Mkt8uNU8pejdfP8TapsLNbQEhSnGXzQXF9DmsOYLpR69EiWm3R9edRcVGWYJqFFmJ7m6WqsONEKWNy+qx4Ga6sWRlYx4RxcCDmihQGpBNpfkpmxCZdysJwUbqo+6TL3RtMalF0L3Pc2NdbT3djswvEaWOJvk9mqI6HPJQ2ogXiqF+fVfFBDan6AAm0s5IUPsHELcnIWLonR6z7swCT2iI8o4Vj1f+aN1BDIONeNu7o9cwocuGMHkmUscEVwZKnL2frXU3TT65cr2uwd9mk0VuftbYAOVU6EIGBRKOtQQkqezByc+iPZ17w7mqp1kVJW7h5uthkaO5AdTqskwcX12YDJEU+qHMLHKX8nA/v9muCGhtF1qc2P9TpZOgr+8yhp/jO+gnqMwChHlgHxpLhHY69Sw3p4UpCIquaDoEoZocDD/UhLvLvf//SRfzWVeevwwj0Q4iRE95FXBjuSxyOzoy/VzT1NcZ4tDj+zxq7ORw14fjFuvPpQfmGMPSYLHt6Yvx/14N7wrL0GdjdtdtLheK9cjQiWmB8wvqR4Pn5zQ5HjzpXdhdLzizprlhq3IQlLvN1WSAxuFKcW8zZ0zJy3PF4eqtq9vaDqoJaHbYLBWCFjuHIVN/ezO6nvoI5LqBHv7XlfddJDPw2UGO0jyJVH0BNo2oEVgr+NaxcpwMyKTM2Tdue3BENnqBtyVkTLuwx2AZ3LM5ZhVGg1s68n8GJ5+DhLCiVBEggI4rT4+dkvMgIGfmAQEZ3c2OwRI0Bc5APEIPZwAMrhbA0TcLUofme2/9dCdOtdl5pvk30tl+N1X//mvohAcRaCNoTZFSAQQuEc3m6gU3tg+kkkyi894D01dVohzpbVeFzgruhI4Epks2D2RlhGZMevxJBsMcsBCGSAFERyOgVaH1A5JmbhNCj8csmrG2vTE05mPk4+uL/lcx2PP1G3efmqxOOru/1jx42CGA8k0wYUFrz/CR0LkYDjcn71UCFk7iLZPY/72UV9BBMma+2swEEGzyJPsKtteXX8ZJ8dF2N7gVPwCEansz/uPFmdHoCSjX/pzo+Pn/92+81/By/M6ykWiW2m84a4rJU47aWKhll0bonIFHvTGDWbBqbzo3pvqS2jmkkWqO4KU4JDNinqsiASwOBml0iKqm1ZwZGCenvM98KsHTWhtgp6+JWNV3rrEaUCvUneEyPJZPay53B7eUQbjYibDAge3I2GVWSlVVHr1TjieaRgEZk4bMrP+zNxxYXXbRWyOs8uCVJ3acW+56G+SG4KzaC4t3hAMUKkr0tFRgiXWan/MVg8Mxp+K3LqxZGJfQf+1QyhVMvFMO5AWk8MbyIEwuZR8JcyYuy+jwixxf8+kQ1ogXe/azzOl21CW8PPCmejEmZJQHWiM4yc1JsRPRyBlHETJMwIVJBWNgFiv/zD0t6bq+O7mky3I2Oyu4hOCLx9C9Rj9jqkl2NVq3GWOTDEToNSF87//JEh5R8Sk4ncLoW4Ywi+NGUlBwndl67DlDNcvQGZTZPKd7bVQ1OeoYA3xjmXSvEIzlWTqxxpczn6AGAihhx1n7y6GOsv2LuPVTeiDk43GVqlLic9jJDNvSR336rmL+83PnPde9Yb16Qf3oGA1s7x1bzqVQXDELa7ZJFKNdA1reBVMJ6ljU161TcqHzRZllI2D0K1EKM/fMKjXZgc73U37/awXchM+1ctnwo4FJ3nLgRmiuuegcP/uo0ZdW9zs0RGDrQonZCsRb45mt8XTD0OZfcknRa4nEfkjBFx4e7s1Yi7W0Z1xIlNDjHODKLaiuLqsBnldBFWKH6TFHzwzWnY3CenUn1nA5cbwsDfOj/+yfiVyfvjFxdEyNXtwTOYcF0+uQCxFPOOdMESCU+Ep3rYXiGCdBpJBsDi4zjR9Y0d56FAGiIISA4JME2dOkzIM500ju4G8Yz4XW9vkbpjwNVf9a1W+Uh2UfXr/7R/qWup17hp4h3puBhykhVQiMm2RJzTxeP52N3u7Jh7JN3garrMiiTDUFYjDuhZtXNH/Lm6Iz+Orikug8lURIRFpLRUEvIxDuXTLkwsqLOk1hylLYSM2QLHUIQmEAPSUnJluceRIXm4FK2Z56Ft1vICb9pTCAEt3zK65YFXHp3OV+M1gtBnrHHAsJtsJDgy4NAbCro4lxvmyTiB4GV/NGSEoZpFXyDS7AEpI4xTuNIF1QHjXELyqnD3N9fawrFw5KgYjtqcGjMUHEAZzyEMtABHVuUgoqgI2RyGJW4Q9l2IcEGIE45+E10JtSPxxrke8Y5mj4YRhH2QZkUpzvRyvoYa7wqoc+Z1EziAJAP0VDhCIi17p6Z+4p3Mt/8h2F//7VA1czmO9FlTEbcuSP45hReTbbpa3HksWvZ3xwZUW25k0FBkD8WLW4qLHvbPfUeSJR9yZDl8Ipjrd+vQUd06ObasBYggMJ7q2vbR0fMH33QV5KYC6BmC9ayEL8kaqoGEo3QBXDBRMQeqozEC5CSjLhEZs8QttSLXjQPldHInyhbhKGCMsyOu2YM9T/x2QllcvsIog7gfEgog1vxCz1gETvWRVYTkMIByspRfAcmuc9BnELJhgFQoQzaQNkqwze2qHniXSw6IuiAgEQJR4HuADgLLoWDXzRSlBE1jEZCGMkYRBlUEy2GOmfGr3jqvR9WYmaM4J2ZE0uvxi1rzbhSWFKj3skmvpIoZC1rjwPNqipdFoB+ZQYU0EIG13ECGNJAYPMvM19848Vg4JOKXeGRtMwYH4L0hW4o0VZJwG0c1bFVrXaBPC0qR+gQynJXIs8OIdmhVaazQOMR/jbQEum8Ub9apeWAc+D683zZv8MaPnD7BaldwRvQIeBtyewc22zAkIPvWK/fM6yBLK60Sm4eFAjtB5DQjcW0NYj1iRkUTcDVWRDApsy/o19d8v5jY//rVUXxDTI1PWOleVB0EA4TBBKJCRG+0CA8MxYlngjNt94yvekf5sPmBztAa7IGw6gJgsWPdKpgvoXKvPSg0ktRTHgARpoO+jFJcfFspMy7mhC5L2hFjlUXxrGMtMUysh/+zmEiztLb8fGp+wbKNSqoqVO6c8vpcaO6xO3/c9puji1jEG/P6j6Apmd9oudiJEQSKNKXaRJ7/L1HjpkHniCxxz3v91jWoC7YgppI67kKkpRYpvPmSBOMQ3sCX/uPXXmulYOdQKYU7JMKZTB2Y1e7uKVw61LPhWGAhAhgOqwhIkKFMeCFrK4ap+b7xe4tNkht6RVlCWnCEF+JlJxDpgDXrP9INKoaBvJdNSA5YJSijCD6ZwzJETADUc5UyMRmejh9E0tKG/HqXWxRHMzlRdeCANwNFZOeMI7JHun2TyG+kxQvvaj0nFtP1FnySI0QSZaOZelfLxkLBB5EhLse5KETcufUH0l8AvToFP1zbaRYHvg+L3PL0/iMtoqbKM7fNQV4IAZQduDRI05STRRY42jGBahHugAEsEju0E5WnpcTbE67PpophZzGQlpixjyuSp22kHBm3a6n6oUA+anKUCVsrptdO3lxPLZCfh44XlMDsyKnLsoRAj5hsZ7ER/0p7DST9XPmT50T6bVQe4OLbZN6jP6FwgqKmcQKXP56+By5UfOXEYkamhJ6chphVS8FTIhZlzHkF7ujGcp+/btA72xzfvaNaNI44Vwq3jYyoyioUDQCYQKuQwyUerfR6IJyL7T0jAX/ItQJ7SLqzwiy7LDgcnpaCnfZbnzDMTfBBAakIxIA2tGAuytwTiEbek5W3euMIeBgdHNA4ESQj1YRxKme6/KFVdXD93IDMLTVyF69eB4KOVspohde7+WS38GIebbLX+0CEqtXEheUFWsBzYxrsn8IvGLrSGmlfGI8dXN1lyVdlRIkxdDCYRhEBeN9JRl4Zc/y5spIrc8JcfX3bBr2hGsR+hnJ5jqmLlzIdVlML9mqARrF3BlmbOx9axjY5DIi2wtWcsAMifI/Dfu0KTFChId+1UJ2rZWusOIaxizCrQYjb8n3WuMbl4Tw2wt+kvFodeOyc7RcuwlXLyz37wIcga0Ruk9zePbpqxG5bQczTvRTWl1FOWyQYXzaZegvktgPgQLqd+HVpOojnfC0xWR8cEVFQ8nEcqfIhK+GbluUCJ/b6sdA3mfkvUbKfm6smdsvOSdJEhBwETsPmypZzqTfubL4Uvq4DXsxw1xtD3XKdmNp9zrAwhyIu4YGQRBD0nOJEQYAwcr0aIIVgEJar1UhYbPaB8M8Ty7GRnZRW431aNrPZLkgyhkHEHFweca+dDfE1I7BFJpbplIqQ6lTmziUmZCeG0L5Dt5R/VGsoOwM5L0WyL3wKh4KKB6L+R6K32UkhscYSFjNDKnIVcJdXc52DjjJZ390yUjwCFwIncH/Znady3GsfHHQN9VchAFUTgwRCBk3JCMqrwQ6zNCUwnw5xPxIPiuOjdb9d7hJnFf6IoSD7qtAog1aJaAMk3sMGIPPGtpHAKdCj0ZsgKCptWO0JBeGyybsjMaiKHL+U6xPkSKCNtBTuuDuSs1CnGoLpFU4rDdWhJdTPtmgBgs0GGKeNIzU/kwYunPUWdTTwKJ4RIyJCFReLN8wZSwt1TkcCcYjAGEQmB9dvoO8UBhzcdk4cqZXiCuyTkcNtdberOI/60pXhaw5kMRkmdU4RCbyDm4FOseNouWnJU5qhhEud3c8K7NpcsST1995beBzlja4Qmh9siJWjGnvID+OKKXDT+iEEpC/UypYl9iZjDpMBo7bhWBDeQPlqnC8dt5y4QnBArorL6wBpc+64EisvPiNWHAZv7X7vYVPuTFgj0GiPfi/t3erVqc2xFviXf2YbLTm/R7R63l7U+/DhwV32ZdBhyaQOjtRF6+91EX10E6y7Ix5POAVnTbcDp8ZgZA6cRm6z3TGylbh5JAsoD7J23/Fh3FZjnijw/y372Ik/q3e81WIQfoO4uaq1fs6B5Q8+IQ9ydvPJWFvhj+/7E/us0PS5z+ych3D2NhY58Kx34K0A6VixxZAtKGmJTf4jGhHNZP1ayViMaDspIHls5Mnq+xZM9g+OoqweSQAWCYugQ76H6dE0Fw4noYnhp0iOsSSxTn+BvR3eJDQ6+CcjLOnxakg5lZbCE2v0O1VP3zmtdS9Qehd+cWNi/+7zzWY5F+Z0QuI4IiJvjrkI8lzxqoSD+EBnMr1nobelkP0NJPFehSMfcZMQKnI7l4Fite8YthAeQozkMOGeWnj86ytcz6jJU71W1vFcNCw92saMju489+6ykKk3lNpkmDV0hBJxYqYOz4m4I6gxAjyiqE0oq08PVM79a+R3RZkBExdCsDKSG7k0PI28L3VBSBl21hhlRS3WMNeE9YSk8tAR7HXdMACxgUcrerN9pNrHF7FhsyV2M4OdnjiCDhSXpSMS3LZGVrYT4Y7JKyt+1Nk37cJd0pmNT7T7LQPHba5Ewt5SoknUgu13S9tjxZicNKwJLDGjjUhgNOCBN6a10clhdyNsxXX2v4jahuF6XIIZbEaUu8gVPAu3C67Hb/d+Fu169O8rA/z9k6/wgVdhqKb6/ffzLS3/YN+bGCoUqCyfNKlShIgLt4nQC9FCDi4baD49QMP/Vjv30kwyMyVn15tcOfwUZ38EbliTQSACXnWpQtf5V2+oyR/fuxsUgnimN4/7G3n16RWXhuNNlGIYgWxT5A2XJ9ZbrTZlnvFv3NjLOVLGWyFWgS+2I8IfT37s+WZ/sJboDUhBZg3dQZAuZ2XoAvDkB96vM5M3PfTTJ//YooyY6DO/4GDH1UXXuE6LmowbPZ8Ctpq9+uX5HRDL5iVygdmIVAEsqaYKPg5FU3U+ghBzDzDRaXe0eEc5DImLSv5Gak8DYsDfa1v/TeOcQlWp5gqooYmg9WE5+rk846h29McKG/L4AUyTnCXAJUo5TsRkG0a9K8k0sx3cbbo0pLhqNmJfWa1nAke8/UpXG2cnMMUXnOC5dhhyTx+93vjnP8j+5UsBdOajo3z+td3Emgh1I4wgXCDMiRTd4ajw6yqFTPD86s1S8aUvvmE15m+qIAfTMkYQUofBgTuShGZoVRWSGUTEwUHScceRJfn2Qd8FJT4C1z+INs4p7Y0mpOe6E+Ol8B7RIdVAS5NrGppgmUoMKrmRiEykjFJMR6TdkrSOyXolbhGbocg5kVnF62VZ8YoQXfnNTtTyqujxiGwAbpw8F3L/vDVJuZe3O+C/qltLWpza6euRN/7ad70dkSHNK/DTcaHbqoo2h5pFJbh0jUxqCoxA+3AZ7fWMe2ivZww9sEPTIGA7/G0Sg8AiUozgICKE1ZC4MBkX0LCwmelUVDCe3BKDuDksesBMdqJq2o0Crj88D9f1o5IS7hEUkgmSgTSGcZ6ABSPHdTL2s7jfl5WpPIIq1iiD7bqXAAiDb91k/FKSICSU8hzFEKIHw5gDAEh25A/5klwzIilqQAhwEZMUUXlRCkL1LMu0azgamFmAWwPGcvdyf2zeyWDW7PjbmorEElxl1gtWHXS525o/+cg199rA2OC13wTyWOQmcXTFn59PPNflyTsLxvsQyaIcSPkDfKQ24eB6yT9q/AZSAosn2Bvwrga+yFz8LJGdnODPR6oCp9eaYOpURB1xwMO9vSg+bBg6NX/BYMxqFKwWgSIGjUIathuZijUEZayTidzIDOW6I9shKm1EI9QCfwfhNE4JwMOJOD0RGAEDgKkD9U8uJTvJiIC7ZtD+77HiaIIRE8k6YnpuoXmkA0PPXEKI/StEGDISGcqIEBA0sqx3IlAtRIDn9b17EYkAxBDQSY4wpqUhFgnsxByRqcbFvAN+CKGCiI44lgWiB1JC9VjcXZd0kGeIM8bgnzq+urznoLeDHIwg6IKyBSdFrGbQOMQ5iDl5OuVvSSMjPuC95EAHA2k/8oYBmk4HponsMk+Pf6cd929dt6xZHPD+et1Ii2xHmIe1Q0qB0wlWnvaAHjNWu5yUzBVoH5WNXcFXkoUdrPOVBXhf3V1q3tofHT7w+z881dmHlWh40vNgGqc4t3Gze0vYeZt3nrHbg0EQk5UwMH9a9dcrQozppY8ETSV5c5x8E1aVuDYfG1CWhda1ebhx+/Jjrsius8ufoepfn/P+7tf2uUs/UOqDTAvKRtKK8KvBRczab3TbqY4FFRcRZ35q/FvFlbr4AEFUkr8FA66zSOLoGwVHK+ufXKjC+XMheDHk7P9/xdBj21SO+z3MEV/afJveRCnSMaKQQZBGHuDXN/vSKALSjCCU1TEDEIYhR1GEIBYGEHKRCAF1+hOb2hduCbFtfc0erMEAmv+rpw/8hBTW2QKGLnHcngmW8jmDd/nHH7hxu4HW3gPsRaVisYGjRoL6iimWq/LuqLGsZXMNQ2iJcZXI7qbdDifoMRuD1sAHnFjvF39ud3z5vVaZvkTrEniyKwYT3Q0SZ+GPWtKuc9vrhQH51dbfbhStrU9qZtS6+NHgmpLuxOGvF+bkVE0onoVcSMKD+HbUewkPlBbtnGzljk3aw42tRrNkUKLxAZ31KK5XwOtXn25oDI6CVfv+JXWMyTOiIT7NRdID0jYFdyJl8qr+gQDrI75gAMmHHqpqa+wZn1gwel5Nw6taVF9nMklcMXbU1Z4LezoWysozIBTri3KQQQQQMEAGGSIAAZ4QRJAMCIAMIl2XoShiiADSGQIa9KIIo7+kCiOsmBz3LoFHbQW0CYUxyYLSE0TgQp9p8vjTqde/Pb6qBy3tX+r0+YPK/yHYuMAfJH1wMEgfwGTsXhDmOZy/RSJi/sJB14DnKRKKaLY5nBAzl8whNoNbwmGOykIexvNuRP5baht2nWnNHj4ZusnBqIobAiNy9YK6wwsqI2dawNwuIFslXV18UKCr8wylOGOkLn+2CLfO+9sbT7gd1/dixC4jjCEeGeV6xr2QcXwA2wYKd8k9pJDwldbSNrK4Rn7svgqkIZRFcejA9ZFM+w1Mi7CEWk3UAAx10h0S5+RsbhhrVfqsZBv2+aQf1Nrv3J+KZCBCHElRAGIQqlmYxldRiTHZIpusuIvZOZJlRoAQJ0Yyu0GA6gHvPtC48MML+zq++7ax9GVy32/2JDprr3rx29/8pUqW2sEmcUxLx9qV4RECMckDsIvK9U1JeYvgziheqWHAV+Jz1fz7w+tTIrd7LB21QwFyxH7919mH3hugWpdSv+bYeY76Zn66fSuYqloZV/ntcmvluifC8KL8t2u/GTjI/bbcdNUbAYgIiJouCZeZgQIvg2AdQTrhs9nsdY/KJs6n3xAuXDWquszFJaNZQnYXdr//vNbX6KuuxcCMVr1wY8u8Jr41EQNaS4u2xiZGPBcIeZxzQoI5zXOiIUGLTH/b9I1+rahQ//UezRMNrHeq36d/czzdvvm9ldX6xzc1HQA3E9Omx+KPb4NIFgwLvgUpqkbqLfkP7P7njmm0/TllQWPr0ezV+rblP/p0vVYk2JbJ09is4NhpEGv5ZJk5G3HJuJDQe8GNqaxRvI6pgCIRJvAwDRnWIKBrfDdxOCPxbqFWvx3oeYQKG5rCtC+Sy2BRZvEdvmqsix7VhVW+xeE2bkKLo91dhbg5sNhm2XsO/cOO59zIlX1ld20hebd/p6Bwd5Nh7Fvotde74YHabcb6gNi+H3XSoD1wf+3crRHmHzn/UYj5X5SFS3PDYngg4xxa38clNpeMLjkefresJBzMOtF1O7mNq7CsvpW4Pvk33PCWgxVYFT5rMlYXQ9iK8FJ6t6ERKYFkxdj48txQtabV/aYTfPJ5tst8kRdUvMDh0w6Jb73wi6/ePP3fnQtMVl7hhuul2mG4pjmnx2I13rYkDY7I8cQPs/NL3lD5BLX85mykh4OgevwfFf+9BHHN8eMPsr3rfbilzB4R6ATfdSF9ZUyEQDwuemQK4kuECp5vtXYLVnOhudVi+w4ek1vfG2xbVK5ueapZ2Dj5V24q2zSneFF1WRN22SoM5uVCvU5gs6nwu7CIRQXMLBWZpeaFpIx01dplhSFOK3mAtLbPWRR45SYkcEKmtxWveDv7QZvcQ9aUq3JHxDrS0X40X5gWanEzpVnK51NHdaw0DoFBQkYrZtMKJ/Cb87i5r9YunVepjIQMMiEUbYFaCTJAcGFCaFpCuS5cUAKt3lKLwdp2Q+ZUyfY75u+sbTm4fbMw6ap6e0pRNvXYjOlYApr5cv0qWjasEhIZSSDsouq46ClQjW3sBtiRFDeNJl/f/53nO+xUZwRoH+7wjsWG/UC3iXEoBxHu8EomxnHcKQtzGe2e745Nnn3cmGl03AG5OymHwpA12eLyLWYIuylIFZR2vga82e2iVoE4omHVPsMGgmuFy47atm3S3XYlLc0vnmyqfLKjUqqZOXJMwsXYNa2945/+4cqn7VK2Tl5zCF6S48gOcZwhH5Sno/wGVuDaoe3Xp2ERQh+UjsT5RKYSN3JYdshKKAQdnOhAEHY97+fGYO3Joc3hCohCHH/mta3fK4VsDX32ypmVSi0DogdqKVlHfhR40Gswcd7kUsjNYnW/x49MpMy4zOobKPYWJMT4sAxKB2/j8rj8jYt/8XXnTTUuVYV5FAv1qhlDEQOICNQw3ArtwAcplUSiaRRxsWtZZMkEQroBluMmg1LPeJMy1aG+MG8+yjxbr6LbOfwLbkmhcd3Aams8p6h/GsN9jOHj97VXrMHDRA2BCcWuyhOPeGnLBLD7t031fKC/sK9sgb7TNyBFRc8/vf8vem3JTTzJuaez+3NkIpXLiGPIe6sm9h4Q2X/Zrs3X7h7+uf5zL3CHisd/l9dzwSM2lB0PzCnhlrN6T7zUW+AQ1K5XTnpP6rYs6AXEHkP9DXkFj3CUmaqmv0VwzTzC7eR0g6jD7xpr4noWLd/c8SoMYiHbc/3Xv29JfbOnHUFCS9uhuv7uxSFTykKLBGwsGfEPMeDRCMIBzwNz7QnV7JPX37RpQ+Azp1vXPqGNyskGWGhViu+yoyD13SXDYrhDwXlImYlpAHiF906cb7lZWwWqZ1ZHP2lkdpogQWg+d1+4eBltwU0Y6TBa862ORULZzWTVcq4SBwS5Zo3cV5MtRzYTO2snq+0eVHPLmdutBl/aGcJnJ99Lib1iRECXMDNGcMbL30Fnst6v/rr0TGafR0ICB6Cr+lbOsevgiMyRLMchsXfrjow1A5ORTDMRUUbAD6BTxZcapp0MApwZzXn+MfePwPvV+qe97++fhidu2EmokSgEASkdvHv3JI7VQfl0TQrcVXur17GCRBZypdBgcBkMXE5ozphLON2AoC+tF2BEh7eAjye7ApA+R4p2yA3yaAD5laxmS9Xba5StZ4v7kUGuaDvEhg+O/gymFy6Zzf6WddTEOI/InF8HHZtVLqDJOwYjVDHqBD70CdNa0D0qtK3mhipLtztQpqzD1XimlCr19XyBsPDqu80nu1LPrFkbv5L8UvtObYRmT0UhhCiVIVTHcP+e4jiKmPiSuQBjLuPYNv3KAEP0Ry86qodSn4R+dBZSZiQMaJG525iXQRishS0pJfA3fVE9hJVqTIAMRRGEDBHmo4ZrWQCKP3SodPfjAnPA+CC41iE+u0Lbn17epmz7yQnHOZ5GdBun6t7doHlCahIuix+lxtqyj2q/uNgkrEOkmsHZq4JBOFPDxHr8gFYxNP3ddM2FsxGWJbk4UiALWEbigrx4C8s1yVkRYSkID/0p/UYbWmSusG/w+64Utr8aMUMZ2QkMCm4O+mMLF7rMHUW7iU1iKzt8hJrdaLk832DKIP/BeFwqRsblvCMkxPo4uRl4s0b8MMzO4deejKuBq53w9zoj6nkTHIhZcVGD2W8NEgfkZ40U2+yqQti4n4m4DavzV77fFpmSLKH16QV5+GFi2astD5b+7jf/H3Kunu+ak8RPxthbj1Zf1qw6E1QOwHF6fu1RqXCU7LoGkQNRVQ8QnFxtzBTeOac7mVKhH9lReGNHktu4U9csvh/Psy4wSobc1UvkOEdTiECIdRPfFYsg4NVjhAEqcjxW8GUI+pTcpj74E/j51evfe3LPdZPRZ/jsyk+uXjr7cmj36n/K31AyBcU7mnvNHEkjIY4c2KjiDtjPeuwoLmmCIBFkhD3A5n9a2IdLi+XbD3fGHioOudl0YI9/h/+Wu0Byen+ud1fgU5PyiWyqjy4xblciiZIbpDtW4oWynX+MGqpIvv1Y1brzTcK1VyaHIQt40wRmFTWM8qcvuAMrS+cUbXFX8a2Htuk+urSD4580yYalkaA1u3LAdficBF2OgeYH5q3UN9ntbODw9I7KebXZdIUfwySP1w1wQYch/GrMh7rtVnOKzUftnmzX10P+aMucToiKZemxjO329EwZ/BwMd5AwEhnCPZPoBo/8zhVLxsjHkmw+UHrWGJnCH47BGqiO7HttPd+ix+hoCehV4+UB/aTjBP4Kiy+dkNjqrEcL8v9SCIEoiSGCQETAnE7hEUlWLS++IQUDb2UCBZ9+Y0TnxzSUgkzge4CYZKMCNy5MQwERJI/iCKMRLxQNKIMS1PWbqRO2iLflvmdZx5qXgJpFD4poM0luBBhghJAI0SJ4YBzZjT36bz1BxMIiCWIHbzQSoxEoQFvpmRZhs4gsCfutKgGhDyZwTpsWzH3+Gl4o+8EO/Z+PbR3NbdKUt4+scnDQ1Njmfzm4NIriskIk/dlI9G+9GVrfA988c4iYdtQNOF13sc1SMUIO6mv6g2AvRmQbV8tyQiYDRT9Wiurk+wQu5enoTCAYVzHqt0Wftw34Ng6oqquqYt5vbu3Jug3L8OKo0dROjDt44Sgq3OEuujVRYI1RuGoRVyPnVBZbh3CIaukKw4yvUFnvcBjRjrK5A78dfln/tK++TG2LBR9GDTcq5kKOtnCRRDkH65AqpSUkWWC559h6Z+auSqEqhE092eO6NCYf4FBLnSqLImqW6XahelKig1aPI3qVOay1H4Ma38Uj8YP6Pf7AE/vqHui1jkXaTIIVOZYeeSiXXo8EM+e2k2JsrcvVbnYiByFlSM4GOmUi93I3qM0tg1cqCw+iroPZOu1CoV689F3hcraU8jwXrYQNQ4HHWzvLJrwKJXWw90qvyT2gNgwjJPUANA9aEpb4URbp1xQUh/4n4RqrOmMx9hrNT5uiWW2JX64t9OPYizvZ1tta55KbjNZ5ux+Kp6mCZVbjHFD8uAahSp77qIdvbp8fkawlWOYdqDit15D1AkdgGCJrnhapx9IZPYZhKXXlYDnMoTACObsNoaNcbXXcJv7nrS7TpV1T0jqNVeAAFoxcABlPHXT/yXvxLKImrA94owubwyZz5OcMbdN+rdSc8cgcCCWJbYFPhZ0QreBcD718qZnfv9dc3CCbE1MtjN8PybSeh5EVvPV2bNIoThve+20fhBBJY+JvX/Brdl6oIIfZ7+K5lRsnnVSWmnkICfN6kxubw363IveQPTDsY0DcQ1Q0+Q6OAIBg8l2EY3qPyLKdutczQo4nHxSZCCD09oA5fzsTdxA9PPI30Fqz8vbNYU3DcK1dRRGguuKaGKt10+9zy1A1qRPMT0qkBJeE4hTZkZEWFCdwor7YKFLpTNinqiNKtUPEVlhaNpOzByO3ftB6A/AnDc2Uz8gSCa5VA5jnfGhIVh3GglsQad0sDX2dOBNxO2B1DIcUKsn1uhOXojwogg6pknEDjnVH4JDIBfsBwwwKnAN5nZyrK+HQIWvKA7ygwzm0LsIMcMJqXHfrMm5j9f5TyQElMIlHPdGetXUNlPzeGSceyN0k8EvNxmPU/o6Bv62s9SK3FCYsVxMGZ4/mh0FGyec10krY5GgMVtbqMQEzFHfACudQBXHG0cliicLXveJGndObgcYl34p+YS8Qbx2T7qpHx5WR/35ZGUtLETyDzTXhX4dzuiF7O60mWhoZBAFGIAGbvMln8UFPVlX+sLE5mvVumdy57mt99NlIUmI+5ru1JzWx5ztCljUbPJFKjxSAdBLCE0ZTzwgRKu15lciqBc1I9DKxBq6EWkp7Xx29bVzsuR7GFnTvIO90qEDTFSQgarbGkE6MECJMsK0eAYQUrAKRZglhAGIIhmUbjjNmLTQO22nMZI43PtllFU8GXvCfmfxS/jBYHUu5gQjFtJEaPNgbiYRv84iXwz4KD5HQGtmlMoHBId9AWddO/w+PIKCRTSB6mRZk7DHssVvFxHKjpxfoGWOvR/T5xJiqBFdzEwt9t7S4d1Wv5Hi/MO/mJxfUOxHvoAIXlrUQ4VsxPef7+R72vSC/BOWYjo3XVpUaDUPBkZlFVs1mbif8jOucPYQQEoOY2omez20orUbEPVx0z1draXfHVlS8WkFVXF7g3vYfXB5++uDzDySv9JS5VIwJi0EJExOfny0qhUNS/8a88Mi0YEhYPK1lh2qCADnk2rsStnM3BuZ31CwyGF+OEWYrCxQJlFBrw0mDAAcgmu5RIlG595neU/biJYgQEkIH7+uMwdvucBj+4Em/j5cvs43Z3jjxZzE9lVn68snm6nqITjaTv2aX37J+rpTYOn/FwpM/tY760qkWHzPDcsTrLfhNo+p7QJIYijFPrFd8RWTTub17vb8sWmzNmj+hM3V9NW/GLSwhL19h2kRMVsoi/A7MfYbXLPfVCl5XakVdW8COIeymShXhvHz8TS0IiAvU6c8tIvi6WqBPHrocu7bfYJxWFCYjjqjiaIAJUOTcMpaxP/2OkRzm3Cf7q60l2F6Ztf6QUIlr7N0MvG14G/SkULn2DiGU5f4ef2Hb2uGvLzqJQnF/77MsMBZ72GtvXfvak+cnB4xcO9lUPLp1NV+/FGYhCmprMFw2ta1TMJfxPFlNStJZobwNLsuQgmPvvrz/zLFrP79/2FT2fy9vvn54bMh8i0TlFu8oCJd5p8Y8PgwlKBnc9MrLBW0Q9yPUJZUEoVxupCTV6TLdRs11ikHuivSJuJKXu4AW23RtMB5BDkSFSE/uhodUaJXJV4h7E2b/My/fIR+nGnSaC6okSJX0TrBe39Gy/6bjPa2f7j6dxyXDf7TYz1xT2HIwsfxzs9/NjfuzLfrLHpNv8OI1zihEKH72Lw7AWjEgstxSwK1yj9jLeOgJg17zful1RH4SmRi8c61e/HH7IC9YBPyQJPq8q3IxAQkAe5Ax8jVHw1CEl5GOBZZLgeakgDoGus5qhmkjHVgn3f/CEf+3LKBO3xtt8O6K6c2RU5n/Xrwndoq+hHbzwG6KP+UVJYyDocWL99e/aO6Oox2a+88FyaJ5ubwNRi7A9nqiGx9efued7qD+/yWvvmzOjwWcmTPrnnq0cF3rOyM11gc9/F55fKRfeoF0xh4YT2MNcifD4p4JrOzCgXwBZqm6/5XmUT/kUfXK3P37cqNQmqid1/lk6OCRKsAcd33VeeTuI5nRvQwkN9SUjb+/2LjSYKiTHhCJ2hsZRS1vPBjs+OiNponEj+d2Fvbnqu5r2zz1Hjfn/D9Pret5vXHrF4N5oqf1Ub7s6MiHO6/OtKU9Wcvid05t32PH2Jbfmm/wZsLhggSYt2q90GGnVQa63Go81ZK2FdCqXhtXWFC6YPz0/zg77z9KZ3gaVpS2vyuGuweuKZFL9y0AxafHhh/mNz9cKBhm7GfYMgnFuTDDAK5B/MG9HpSDruAvVu6UTgVL2mdg4Tn0Tneo4cbba0SyjcwYCUflEuoBAAYQkansaYN5MD9EOKPMpVKyBDnbowIYU4/N4nip1OsWCgK7+zbXJ+57+tT2igLPHxqHaO1re8OCCsW2YjmUh0157tMnvzInuen04S7kb5cxz9sMsjH9j8lzbUYVqZLVWRtuJbbnG+bJZtxcGjfcPF4CIevf9ONpQ8rOJ9zViX2/e0D6cvSrzmMvyNuOR74vhHtfHF1cgmOCydrMyTGn03ryGStXOGhZZ2nPXxI3ztip1anqATQYia332B+v/ddHKBT3B6jq7sGx5CaDjeiAB+TcXP1ipRV7dilEJQ6CkOhU5Zw7jh+3n3AI6r3UkClJ7IAT002mrvsfvwe+v/r7Cy+czak5lPPs9ugmZbU8XFYpeIytYMHwJyBwvmPvf7crYh4N6TtFTm2DaNQkTil0mk2kEc95IIexKIG7wxGTerJZUiCGvVokRkTS+Fj64L/ffGjv1T6oT+sbDajUgWzdWPbVKga7LwY6VWPeRij8CuWtLuVaBEcGe5jMFUO/H4kQKcPKCLh6ZkP8wqPvfTYFS7ac2BCz+PBiB0P4PjmvIbDswM0uNAcyqEPVrahI0oFs4tQ6x50Yv0bwRdxEe/XGAcYvmMQr3BapeIw1BirRLqv3+s/yDOqES8aYElIvSbpsVxuhajVOyKbY3BEo8Uy0oimvBHsBjjKgdaJbkOaFjMIlIaIaOS8D2RpGyjBhGZg7pY1RljtzWzjP27E4Ou/AYq429H9DKldUhC5hToIzCDp5aix4odcobKau/tLQ7uZ2QtDF/Y/3K5lXh8rm1nwxDfSNwuSe6NCm7z9QO6er7uWWlkOzPxPbalF785KX91189LmqZx/ZOROyLwxxU0O24ErGLeIrO3ek0y3Lmz2Q27fsp0XCr6BRRy6Jn2dorbK03OPhcrx3aNU42fp0oVRYUvLN0mKbKoeHU6/vqeysiZJ55ubof/tWAJwXH755y9ojh88r5LGufxz56vITv5VWGC81Kv9eVG9o34KgFA1PerRtoKf//AsH1+xabTjgOInR86Ftn3iTnsmhLAtPy7Ij6+3ALNRagFiwuPPHv1m1fZ9RcWLuPoweAft/5rmYXU6Mt1rJQaRcWxYqJfo4O/zmx2VfVPCgbvz1rxn3jzWdBbG2n8Pg76hQu2NywfzNN297z1wQYQNWkhQ8ecvkdsTlleGPBisf1bZoLdgTjT0/mDFE5tSflbr8L91QZOKXTM+7Zbmv6HJv3ZCRe6htAIH//uEd7IxbnDBJW1u/w27Wlzc3t0i0ynHVvtXVNBho8NXs+9EfXZotk1+U0Gg+lQYFXCZoOxrk1cyVgC0tLA2tOvu2HQdEcOD5z8senGAchMJfC2KZNdlI+OzjDF5Osgav9m+djfe0YktiuJZMUuc8klDfKfXd/2+19xebUsG/Zp+O1sfXjWsUHXLZNx9a+rECI543ol4H3gOEYPKUAvPoutvjFN6U5qxD/nvkjUTNwU1IJxARNqHBcQQ8byKJxKKtH5WzC0i67J9Ob2L6PkAo0ymK/F0DBjb9ubGM7hMjYd27pVfkbtIGq78B/vM9eed8Z0/mV63bELCarFb17uzeCNUkcbPOQ5a5cuLNoa2dQyKGhTyv2NTFX0TzhQ6E1/P5r1fs6uioKXA4JxyYi4YX8reSe3+97N4Nanmqh4O82Zh1y/PvPldT+CwabX/hTMPJwc5v7I9ApC9nn1aW3xxYcrBsydkZy56IO77uyYRWvqGlAHVMttD24JajVBhd5qzoueLMc3U/YRpo7Hhc3Sf8DBfSfefFsZ4eP5QO/9Jd12uoeKf4wJJINkKRUos6x5OtsSsi0wgXMNOcEDHQrCwE3lfNyPeV7C17n8jMPZhZQ3/oylQWG1oNQ5/qRqcYcWBtnK3ef6OHyHi77hvXp1XuYrM/vo0J9Y/fX5VU7e0BEL5JemzFyUOWqP49G7Y4oicLvnTZ2HxS+BxE3I2Xu59wO+cWFuwMBZdHDThdRRw1kkFwU4fg77DeIKOlwCg5eryDGx+uY65KqOdhMcMkmTYXOHBy+vKgsMQsj7gbnciQIokYRnpgXofP5afVpYKcrMQGDojv97Wcjt3rXRbv6/9c3ue8gOjH2np2wd5DWNxoYmDbxAHXbt5c8CS8d06UwT4p5jRunjR7CDUPBn/ltfZkf3VeLysV5qs9wnKO71XmlSfO8oehXxP4ooH+kgW0CpkkuVihEHAGBGvx9OVKhyETkDwOI1zisf19/vHbeicSndh89qVMuAXCTcbLevNywl2+4qEWhtgck8TCqL2UBqBqQxSV4BPytIXYQNmK3K0fnH47tZMbnKDZNuPJmx8POiwTDmECCudljiBP9/NFZltcKo2Qxq/h9CXfN6N7d1tHpAwSRXgKXq3L4TU4ngUNYFRBgWnIAW4mnuUCT6SIpkHq6XPE9RlKCXAAtMGha8u76jrl5s6gud4A01xH4YBN5gMojmfUACstW8M2F+GRRYny+3iuAEHMI+eqC+R9u/+g16sy2YabYMBj+ey2jy8syqwoJVRQC8royRPZ8ZTgInYT3wVNvSJCAcShoCpTBKv44HafRYalQT/rsbUX0fuJVJxy5CE8sjQoz/yueEf0VzDpQzJw2KYfm3Y6//fhvafe1m4Ei4Gu4uwjQHOgWD/UbZqmAwlm9btltxpV7ZqdHO0nyB8ZBwo8GufgqPjQGliIyAKCYJtNhC3IeZi53COEiXGuBoCIKvffLpPxrz2fvD3++54rEZjk/ZfxqUMORq7EUYQSXpAkPTP9ao4cR1a28SZ6SdDFzCPEIGwmJj2bO4vf7xGXjP5EoTDtAw4Xd4l7by089P0V+uvfNnLNOMc4J8lbjIqqVHvWXuyViy+Df732/2vY77VDg4+G9Yktfla3LDD/85m2Nb1//2AzyKB3I50RZbKLpReXFJei0DWwdDHq9rkw3BvBiznFg0ynXvnr8p+2/eEt9ruQaeKsgqUUYNGk1pbeks3pTGXxqZii6CkvBN5r4oQ2CiQAoM4A6vFEdNASHs1GTrHpWwM+Cfyy/UXmWzXgq0qf2y4dmH5JilBxjdBX63/fzuVe0+vflU9YcJ5rGbV6hvbnDxsEk/7ejgwvQXqDw2xCRCQov3Xy/tHkT7M9hkULuPkuTIj0G1O3cf4cvlONmRepokwZMhbgZYP53XffrZS1Ftzn9cGeZbZtO3JrPw6ewySchz9TwR5f3nY7ZLD4vl7Mz3M0pI9/3O5b2GPznfUJwDu1Y/Qg6Dlh/0rmUm0XY6EewPa3qv6K/F/86PGN4luX/kK65wP5Zryb4L0IX46/V2rvJUX5vsSXph6SXIGFPSiDl0HQqod/mWOKBjM65YU9o12mmfFpqPJon7Fh26V3n35CoYja1dTPi/pusGULi9TxtXNDToHt/WX9ov8gMcLo8SVg0eBF7ttywRxFalbjYh54M981VSXiPU6afHrKmC0CQm9kh189YiTv5A0MwYwjPvPF6IrWrsF9DP3Z26PkAymEh4TLCz2bPJcW2DdlXtQfXAZhZyJgWpG3xLcVrz5bd8/EoX+3hro0EPgyter1Lnpe2zudJqDUsDMQ94Hx8whwjs+zSwRwOtWeeub+isYfeYD5OJbJXq/PvPiWoMGO6/exT1jERjZ47No30GBSHSQfdtcXc+vLTLuSkXq0QMj7LLf7apnRBZ0qXBUnkUZbS4WrqAySkqVElUrxgzgxx6XBrV7jHfES37XvFty0rtQh/punuI6gVPR8y6te/k5rsr2jLgpRXIfVb2yKcMIURhOaQYJOQpFEEm1tBDOPJ0LQVeTxiQcjAkogGKQh4q4VaIsd51NTiCvhP27c8fTN0V+h3X9S/LfBXV/rFKUBDi1Wgu4a8CY/rd1wtuPXILS5/+rzgQgzElkyMFlHDAGmIahBVk0IAxTaZK07aJ0U/ff5uJvHLv/Lsgr2BGcCrd8f/OWgZI2Xm6UrE1CFyANuVmMKgFaj0GOQalDAD4lQxLF3ISxm1naL2WJHJnOeYOCGLQJqLUdvFAhPG4StG430XY70+qAqIu4itK7lQiejP+bMML+81o9VBIs4N5f+GbTkwhHO34HVXcNxR89C0wqcXFxfAElSRowAPoQaSjCcmnGZ/8gNRcN9c6RPd+1d9CFwyz1ZRgCEkIsQgCFAXEyWQxoClBB4AMJJEoWQIF3zQx6fRxRFYIktxgjjBCLjanJoVM8wU/DB6L5NH7CwAH6ps4f0R+h8ve61Uw+M7jqxp24OJB7sVLljnMXY51vpjDyM17vm1th63+l3GWfQUkczrUWCM1Oxf9CzrLXS6rvLNT3nSL3VfaMqGXqFFo7nk7wwTg1yxAhnwQ+feD7cIY0c1Oo2BGp+MSuxx44q7Yt6C391eyaiAS3laU56/KiFymyzJJ/v2YnSuh3KUM96BE9/lrV49V6GQvuzFvNb2unpnz540zvpUyZURYPdkTccdXKNBIuzulBrLyuuV5xx6Zc5vu5p8PDwcxAktQDurKheirYutkr8K6dBtKIXS4yGtL3wl0ycZ6tce2nQ1lo/o/SpjTMhw4SZLhCcIw4FQmlLs9tE0K2ceeKHL98jjoql30Fw6UDPt66DDi+nt/R7eyp9xzQBBPctRETOFIHVv/B/J5lcQV7jJgM1Rw865KvxT1n7XAk3V3b4vWLBbWfRq5nK032FXXrJFvWkdOXLJ0VxtGbPndJ/Z1pxV/Lb59dsb2kYfPjcqtJUjfZgJHXz+p8zs2uJzlx41Rd/3cvRBxdxHx4ucun/t9NgHBO2Rt9yFpPl1o1pdCKSGVv8QR37zaaclzmMW2KxFpF8sHCZrDNmMAir0Cdc5adXPPumJzfeMXAksTDyi894ZnCZfnujIDnqhG8/xa/PjX6Y7a3cevDGoXb9m7Ee04FlRfS2Zk5IsON/8axo3vi4wG++eiG+W3hepQu+sd7eNd9f3DzwBUVnb1uy1/dGc/LMpGnHSzUhOUPWQxj0HN0xVMl/tHQZdzo1j+OU1hqQOj0W/Kawq6/ytPZw/a0G8Vv5vStIYKT65cauL3ZWKz/u+SFuW8IlzXlLLYcT/qkpephIliWeJRGgnTj+fdvtld2KYej2h801tMVieSNugfDKhrfzbwGvTAXluRECbhtcaBZw2yUIquaneAL+GL1Euf/7TIwQuIQvGVzwFE/hf34WnCEI6I0fvfKzNUttDV+B9gBw8f+ElwKzu7j9VHCE//dbEgLc/X/ORA+RgvL/WV1fe/1BGgAA7QCQQ98U1bwtzrv7tKx20pRSm5wz591pn5/pEPHQdqfziuCEUbboW9HvaXD/Atadn1pkzFxvuN30l/XzxLeWvLUb8tjdB/ZisyE+JsfAGr8L6Y0Aeqn3r5Pwg7gCAGYnvwgAnAAAWeC2d/EpDJC+vVqBPYzO3KixWSV8m813AQabAQBQCOyLkIS8ABGrMGK1GpHQbEHOVrcjpUVEgZKok6ratAE0qcku/B6w52l8C4TzNr4NrPnG8H0glvDITzqQV04QFRZRy2OhTTaYl202Zp3Vxo0asEKlhaBtg43WCYwe+UqbJ47aJKJSGI59pkR23kD3dgEf61WgBVxteRj92k251Xtv8MTSsbDXs59jM3DVhqlx0vgWVKysVQcm1o+ujatjaqKSuuJYdEsxva2mOq5uNZbuBXlUTK47cqeWpqSBnpbR1STk2R7mhuW8La6aUi035uqrWNPlyo1k1hsnVm7YtLIKlscmqOROUd+8Y4VnbmwdHj1s3zdWX11rUkyNe3J74PIVE6M0ladyXGvbtHH1oibXFnJH7+RmSmjTIrquNTki+O/qCfz9dgALYoD5A8FQcUlpWXlFZVXcvy6qV8dq4rV19YmGOenWtvaOzrnz5nd1L1iY6enty/YPDA4NjyxaHAQJ5CCFPBSgARqhCZqhBVqhDYqQQYmuXLdt46qINT7Kb75hdTicDIuaaDgMEYiCDtUQgxqIQy3UsZOiaBs75qR/C4DDGz+NAwAAAA==) format('woff2'), - url(data:application/font-woff;charset=utf-8;base64,d09GRgABAAAAAHo4AA8AAAAA4KwAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAABGRlRNAAABWAAAABwAAAAcgxtSpEdERUYAAAF0AAAAHAAAAB4AJwBNT1MvMgAAAZAAAABJAAAAYHJYlnpjbWFwAAAB3AAAAKMAAAF6K26sXGN2dCAAAAKAAAAABAAAAAQARAURZ2FzcAAAAoQAAAAIAAAACAAAABBnbHlmAAACjAAAc9cAANc4BKegHmhlYWQAAHZkAAAAMgAAADYR5QgpaGhlYQAAdpgAAAAdAAAAJAuBBZ1obXR4AAB2uAAAAKwAAAEcKkRAzmxvY2EAAHdkAAAAewAAAJDMYwHYbWF4cAAAd+AAAAAfAAAAIACfAnZuYW1lAAB4AAAAAXAAAALsHaNuI3Bvc3QAAHlwAAAAvQAAATbMg4Xgd2ViZgAAejAAAAAGAAAABto4WnAAAAABAAAAANXulPUAAAAA1pYy9wAAAADWloq3eNpjYGRgYOABYjEgZmJgBEI3IGYB8xgABqAAdXjaY2Bh8WWcwMDKwMJqzHKWgYFhFoRmOsuQxpQG5AOl4ICRAQmEeof7MRxgUFD9w5b2D6iS9RfDMpgaxi9Me4CUAgMjAGYvDc8AAAB42mNgYGBmgGAZBkYGECgB8hjBfBaGCCAtxCAAFGFiUGCIYqhiWKDApaCvEK/65/9/oJwCgyNDIlCMASb2//H/w//3/p/xwPKB6P1nt7ygZqIBRjYGuAQjE5BgQlcAcRJewMLKxs7BycXNw8vHLyAoJCwiKiYuISklLSMLkZeTV1BUUlZRVVPX0NTS1tHV0zcwNDI2MTUzZ6AusCBLFwCa8x6LAABEBREAAQAB//8AD3jaVL1prGXrdh1UX/+tvtlr7ebsfbp96uxd7am6p7333VtVr/N18+xnJ35+RGmMsRzHcQgoOA3BpEGRpUQ0EokUIoTAilAQDkGyFQkhAhFCCVJEEixBkBNEkFB+EBEU8QOJCN5ljPmtfd7Lrbp1ztln79V+c8wx5xxzrif6ydefPNE/437iiXkSnlz9qnry5rNfC/bJP7r+Ve/+589+zWh8++RXDV92fPnXglf/72e/pvj6TXfeXZ5351/XZ995qv7cd37O/cQ/+Ytft3/zyZMnRo1f/FXz4+6vqF9U/4n6O0+eqHnw81Ar+auDT38Wg59+GodTNR/86Oen6joMfOFELeb4Db8sThy+2+/2Vxp/73b3d/e7h/s9/r/d3eFffoOfb/nileJv7/Bnz33UClsdFtz9iG/8sRqwS24ZO65N8DtuBn/eq/3t/cNn6o3avVcP2AZev7t9uN3v7m75ljv58b26u5c9v1c7/ubhFoewl4PBBrb7Wx6kkn/eq3eK73vAod1xg7KT6Z+79+qau72WV+6xG3zyM3wQp7nlFrnx++vFzfXdlh+Yj8PN9YJXcPdGbUePazLgsqhFujzqZi5feKYLXMK5u5erhYPGFeKFuMC1HWqNT57g7G+u72/mOMUrg41f3t/xkHHib/Q9vgy+UdgB/sqNkMt3c403mh8zRimtjLan3jini8zmtvPWKeWdLoscr2mljTbGl8ppVyvdjtocK28sPmfK67PGNpXzThnHLRU219pabFIp55yxUSv8zirecK+qRrtgHN8RjLKmKqLXNtja9meucNlswEa1w0eVitpqi9/iP5NZ7bEh6/iaUkFHj8NyGX7AAVp71Bypl3nX4Qel8SuV29Ka3BVqoVShG7xLB12Ouow3OlgT8CalrI54O7ZsrOGu1LPV9/lM+1w7i7NwJvD61Nod2SaY1kYfrapL7LQJ9jt/ora5x5FGbMGXVVtrGx0uU9/7WW0Kk+PDhtdEqXedznhZNK4nLg12i0vKy65zb7UpmtxZ+6QsVBvsad1+5k07upW1gyv92m8W1bu3xduj+bHHpYi5ttjs+vnFZxtnYRE4yKa19bEKNjj81PZ1b+rcWo+zm2lbhrzNBlu27mQWzkO2s0U0tYlj7Zcme6GHJlPV2fDluH1xqmOMtXoRFld+Y47rqGPm/SwP1RG3jKuPPcyOZittW4WLiNuKY8E9wn0uVLMpscCO6tJXuPgGt9Z4ZVcWe89MdO1z9Y0wxMK1M9PZplC+afMb9b5obmwZM5XhrHBuuc1wTTKnF2pjTYM/OsdVs1h6R7hUefDW9AY3AguWVxD3pML60q/XJ1p/K9riyPGq42LFoJ3hBS94n3Ff7aiXPix1qU9r8yu1Gzemxw5yX6jofe5qG3HTtZ+pZxfxFB/2GZajN6b6fGiwHVfXWNG4BVijuYkhC3uzLjehLMo8qHBevQNUPlHqG1/8T+bPuZ9Xv4EftjDxxQAk2Y4w8cU1DF9g6gLASbs8QOcohinfwN5PVA3bDx6WT5i4uZ7fzIFHhMH7G0HSByDJ7lYsnQj5cLu4BnYBiPAaAOqdkq+32GXa6DAnyACi+WF8maedETcVvxcYIZp77koDHW4AwMC66zvC113a8P07fXslCJdQdnebsO7hfgS4ELNk2+IECDeLOQF2zwO93d8R3O/SQb5XhCiAmezjVjD2DrslLONE74iq+Ag+wb/Y5e2JlhPAeUy7wfUZPiieKU4Svx2SB8Imrh9uAM83hOIJwN/gOg43PNQdruc1z4cX8547lgMjQO/ly5W+uFIXdDd0EPQiuK47gLgcpf69u2WOpQ+UAXp4opHvVO/dcejyPFNLGH7msY6N87iWnoAUfY5lqWFMAC6sWAAS/sWSdgpYplr+owpLlCJE4GX5hfdERwXowJKs8ElBEGyGmFVGPYd1zIuhaFSYY5nazJYKMBOJDtbDNINJ6KaN17B/Y1xYcCt49cSZWXZ2ii1rAD2sx2msdQ8Y9wBwIK8yRN4c7w34AijBd7AOfrx2eW5dFQGvdBSO8GcAOA7YgY0DErXJcPhKy9lr/vE8Hs/DwZkqswmm82Zmy1LZjy28jIFFAUdwGiqGnfv5ZfUWJ6Fmed7EOFZKV7pSpe5MqPsuvnXz1csqX56cxOMcCNAAeOGcXO4WT7dNLHhx8Wlcvd4qwEmlWlfB6Da4xnUIcsi8kLXCEecz27giOhyyacJyb7Mu25/OF4vXxZdsP5tt9tH0MbwtXgUXa226aNvKLmI4y5s5jrbGavAxK+G87Mro9TqGUdkr1x+HJuJy6tLOAIldr02Pj2Mvvsx/PNtsTr663pZO10tXPSsXY+t1UPBzIarRl7rAgdPZWeN9yHTehys198dl6cxKtzt8LGt+RxzdcVFmXh2ZprQ/4+x7A1fIm4e14GwXrMpXz16cVcWm64PTMbQ8T1xmU2GNajoPeGGHq6RyM/evjf9y+BJvUo1b62Oh88wR6JUNurWncbysuriyema8s6MzGdbUGk5DxVLRJYfzvuwFA3/HF3/efO5+Wf0ifrjdJcZ1TwuFEZH6EZMCeMSCBj3Cogkg+93FlqZNeiNYBKoyJFIksDUkfBEArDXsH+8ZyQflzcQ0wbADCglfNAIL+AR+Jzzo8PM8/XsvHFMYoPyT0IloQE7I74XrvVcfgJNeaK/HwY7puHBEI48C4IKfZBuPf67c3S7RQVAxoba4DAKtaWdkkFcglbcCr2SN91f6jbq9Jp3jkcj+06ZUIsvAIVyai90tibPsS6gnNi4AS8QCmumfsuRAtGL44M65Jjy9tCRxeD2z1nFRAcCsd7FvVkpHOGqVw9ZhoAF+DhhCUHLJfMlVYICWTAbfRcACOSK+BWwUoGcqUyRhWFawrJCZE37A3I7zcw1md24rbBJ7B+5hJQEYKiJUARJgbeuADNhHjHC8LgpA4Ni+r33RfVyV+NjYAzUHw3dpIT5W/vInHBMoIM6GnIqsEYgayRdbvo2OP/fhhVYr7FoHWO/a93q0qi2UOzUleB7eFkg6SD0UbUBzU+7ndIULhe0aksTKWWFq5dMWVAiQ2WgCS1YWMC/VAXmBJrk/rto32TMbKxvmMRxZkFOvswieA/MFOtmRjMzvgTsLb4506GyXt7bwunLNxXh7VNnZiB3lZ+X52ek6bG2ZW1CY3AXQoU2sDTATjM0ORRjDZTgxfoFtESMqvZrXX7XwCDYa63M7DufwSGWJT/Y+r5vmo3f1wmQlyUsGUlG2+VKPAVf1JIDFgM/k3Ar8WUlGbOhocDML2y3deIY7i58ysOnrqvyt521+utJz9QxvxkJp78vlsqejK4HgQIRSZX01m1eAr9GoqnRd6w34uOa1JMCTWxd6iDM7+g3sF47GC2968utf/OfmN/kn6t/FDzCGhTh6+nUEQnNEONf314yFDjGSmOIo7Gme6Iu/SGETTJ2AEIgLggkS9yw2mrxkSG+lUZHGiNG/EzoiRgRCIETlzYGbjdwLg0s9TrGtYEyiTtwyY0Na9V0y9tuEJAxiBT2EhjDEvSXruCKhgNlP1ALMbf8g/Ao/7ME89oJ+Wxzexe6FoslzLxcCBFOoSh72QSU8wCvTJTnFJbsmSSNxeqOE0wiI6EOcig/tr3l03Bq418V2v9sipsW1eKMfhJsBgHmA97IFHOX+4Vo+rZ/MhpPVrFy1WAxkxQj2Clt5+Fx7Fo6ty4kU4OhYE7jLwAZbFhuszYK3XYNa2BaLKpI14fYTHrCJatFvc7glXcKbeBWTWYNnZKpHkIY1F+FhyI4YT9Jx1zlCQYBQl/lanz+vadmAuh6kggxFgU94ghaDMjg60H+VAtNcS0jaBi/hHbZtIhlA3vjIMA3hmRcMIs60M4aHJFE82LL1p+c25qV70c9caZ+2TYFQkCwCpwnXB4aXKYSava4Q9cV86EG9Qmm5uVLQAeAhZy1IR/dMwoV9RPe3ZyXIoytftC/PEXrPeAFmGY5Gt7rLi/tg1+3HatPcLytT2Sqz4EOnXh/b1VksqrmZfevt27BQ32x3mV8/+/FhBEYSCnXIs8o+O2tjGd80s5Pq6OHLxZHu7UUMTQPwgmeoyhf6pPp0jNWzt31x3GbmyLarvDgZjF6u/Los/QBvgfBosGbwy8XRmc+LUJp8zcBsGRGf2s5mleuq/rQAw8jOsvwownLqmT8+yfeZfl0UJRCzqbplMLVd48RiHqzn3a4Z1zleFJX+8TatLCwxRPi8d1kbliuT1VpF4PoQjyJvFL1V0IyjEYp1g13nYZzpsYVH6RhjYzvgrAjGgdo2lsJ5NTHKIjBFSMqrj8157syTd8aEP3/3i//B9O7Pql/GDymJdH8zh2NnDmvKXpFXNGq8SdB0oq7lK3Mr4wJUZJ6YCP8ZhYXME3iRMgj1IIMBIIGyHHIyQlvmsnFx8IjmiAspByWYxO+FQO23CQWY2wEkYUfHSnJszIqFwd+c6IkEAfa8REmJDNUIEu0b9ZkiDlz4tO37tIO9pNyIFkxvSayGP/iyl9Byf5sgi5/k8SU0k0gNRwmYkLNt1H5CyylZxi0zlrtNn0Bspz4VVHyYh4sAggMsOsASgYmItt1LqLefknT3tym2vdgiULvh/iQ3qP7PKtox9BrusS7GejAB5hiCLYIzIQcBMA4YFM3ZxQo2X9o5bv9GwWZhdZoMBXc8B/w4Lm1CkaRkZK0gRNHHeZ2vyzFrVTl6YEak/4VN8X9mB+BxEddhOfpZy2iH+SgtyTCCiMCRI8Mq8VtBwtVxLD3pVKbJvZwvTeaMNSGln8BAQspq6WQIiAOtqoRcYXOAqkxQzZqYwM0GgiVMxDbGLIOt7IsSIULW0nKwGUnFRfkunL81tsDlsb7NEXCZuuyOuosY4efhkXGZrESHxFjPSI9nGdxP2i5zYC3Yua0H2+Tzy6wEVOLsc9B87ciXqmW9d/P4dHOZn6h+3Q4/7D8qbIlt4Eh1Hd3qSHehXvzMGAdQjcyG4yzrYr5aLWI2MG2GcOujMtsCCuKoGlyVzjUtAj5r8iwUw8z0s3XeahdaUxYxbmbWfvqbfYmb5Nevi1fOtOAtlaHnyDvX3jb38Be48942bcAvF1eLwgBUChNC9J298LzNCAsrYgfYIb6Up9nKfVwPvoAfCBFnrEzh7aYqztzqWTZvX1uPaDUzTVHD/dtj28bWxPNnS4eP2wpbcMAP8KispZcZ2zK4kKte4SaDYWe4fbggvJPwTj+qfV3OPCh8v173EciYqTw3PX+PN2Od6UyCZu86H7i0wJvLwHjqd3/xd8zPuJ9Xv1Vy7xLuMD1M/jGXOIhmyCSSpOCZ0RmmHI/EKWQujIEIRn6/vTKIM5KB3x14gnpIqSNJ9/BXQnYAdn5ggJUyOgQsgo58I7gnv5kiJ5ANCXkm1nMr2xGwIKwQwBKwXZErfeAuJJnOXSfmQcDb305wI6miKUN/Ax4yB7+5T0ERozNBjofrD5rM8IO6/1TN76dsOYJKVgbmY5gfq7C7EEoF4kYQF8R+uCdhSkTQ/KSfwzDhl+mPcNUjzYLhDO8H7BAw4+GOxRphKpmqonre+czRqEsabTTApJTqsMSECCIiWQYx6ryEzylhNdgmVg12kjFL/axYXDYvz8va5Oap0VVOu6VzmtEl6ZTDAEcJMHsuDtMyl+xVf86NNjX2kvu1WxmBkWh6xlTg/yVsvcutGV2PY4T5KFIpsHl8LJLkRBAEpk0yX9qeGXX+5VdfCjvRpsBfnCRWNuAqNzBq83+cDfEyNmflc51ff7MN+ywE4NY8HOnhlR1nIXsVByz6zOVvN+9zRC2ES8Bw9DX42liPmSNbDD5bOmw/2qbKC70p8vcxf+ZWPmA3w61berfPgQdSlnB+PSyJQKGpXQOLuik+NzXAQZu2PEVY1PnZGoeR66WBhWrGu3GM/tpn2ZFBEKZAxHQOIpbh1Jt2aNc+Lor6bfjhnmkj/fpFMB94t3nvGpjaos5H/8wNMGLfZvlCFXlk6GIRS4Hk2tJc1DNnF7PaysVSUnCIumzoPzJTxDbqrIWD8IiMW107OCUXWQXBXXC5H47PJT/yZ774u+aX3E+rn5xiHS7NRl3TtF9KxWYUw9Ujk79csIvrh+vFNb9LdpLcIem55GgfbiTpcM9caAqPEhP47h8puPGDu4udT7legYjFAK4ypoDJH3LDTL0ATE5TLmV/Ze4ed3kw61SjExfPzOmVxuv+o0QD8Plh2tg4lyw3OE4CCkYu5BuSNpaksuR+hSi807e7hEqHuhs/cai0Sa55ogwTwvBX79Qe+8IxCTu5u5aMMfFBfx2wvIlrW8DVlxFmRv8tRoxAtYcDjozQWWViNsPj9o6tphGy+ITFb1khgpXndLylwT3VfpaNwGhvj9u+gY3B7rGxgpSbmJFz684BGjTWB+lFoLMdVic2COUQJoGVHZREJ/gcjI2pGIJFGchsjQKRYcRj5EsMz4qqYKAPdqzpymFXRCVgD1iEMlws5BlyBFiPHoaOn9pytIO1Kx5VyLxhKQO0GRtCQOJ+GP7f61lct9sIC3K1tcdMouiyVjkCLWY85Jjq3pZ1McdRel484KQCkIFWgVBE0zDsKrQ9HUEoFmftPdDneJxnLNIY+vjy+ctl9rPMLDSf4lK21xtbLrNP1q9clbdWn/a/9X5rWZ2pynicl50pwHbgD1vVgriYCPf+rPqmdZXJ8wJkICz1t+t41AOqtDlz93fdondHAL6i7MLnzeur3qkT1YUq6DDMNsvKx338ieI+qm5tz63pl3H47PjE56U6mhk44MGxohb31Z3ceJCCJU4Pjnz5DWeL1jZFGcjyKgSPHXDkeEFEx2X3HtdB0TE0MWtK1sh/P2x6dL9P/Zr6B+ofwa6lsiLhQSL/4rZ3t6k2s99JgnMuRXM/UfVUnBnEAJMp4rf7lB8gYd/Bg7JgcggPUuEkiHtOKVD5WHJswzyZdwouHgs/UiKnoxfnL+xcDEdq5JJXZIVmJ8USMvNDiADfLrHIlCfZTQUXVnlYfQGoIM654LthvruH2/spH3L3XuOML7i3tKF7Bhq3BzNmAYcwdnc7ZTnN3e3FDoGFJDdBU97wcu2k5H57sPub6/T9dUoN398QKBfzBKNjSu7ypOH+eVn228BLgXfdLOY388O7JO6qtbk/BB44rDe6NosTky4m3iWnuBgHuZMPb4HDd3tQi8U9UwUsgpPaEleOwNSZBqMRMpsOK3VSBWG6XevK2lD5slWvmzkMK5P8okW0UggqYC219PRGqjuwa2yxMK7Skizx2B5xyiHOkTJ7gG3V2cusimC8KelRMhtBVGHhR/xZiktgs5n1LKxECX3J6osmIIpmWl8AB16TWBiYysBvI/h0TAlXj60BHuvO3XNbPBLL6he37BgQKSech55Xw1YNMaSE3TOpxx9YotesrUdGTogxxBEG5nAMybjEK7QoXZ5U+drOVNUwP1I8Y3DimG8mo2J5SDJJlAGATjmmj6Ru5mCu4A5/QePwzSZ+u/w5NdvEZs06Ey88t+89D8++iouQX3lQ8MbheAzikzruP/Ivq1CESp/vV3Of55f9KpYgG6zbPH1/su6D9VleX7y8jG9NVuVa522Iy2x43q5cwXNGvOMRXIXh22uLcNH36UYAAk9eAICyuDoeEEoGphwMwyNcQOd9ONPtoq63c/KHuclZh7Fd2XzS/qiZH4ftsfanVpLsgbltc9Yc7d1JNc+5EyU5Wd4ChfWzcvapUb3tX/6sck2lLjI/3Sv4r7zX+3U+xt7rJaIjyZPFYNRAjUcWgNhS0Na801aiY6xW3FOmrnJeeywVrifea9EzpLBRM4tVgfCAGmUdCWSpalzmzDu3wM3NdQimUrG1HwX9/QUcj/rOP565syt7vkcEZGtcVUWJBTYv5HPmvrRm9eAMfssvlVDv5n0Lh1dGK5zpxRe/Yf4N9x+qf4gfJCETjjUTLm8UA51R4C7xe+LmiWK96MTgtYdrQUV1qE/DY/It1wdaFLZTqhmREbZGzASgeW73jUK0tjuA027iUItrJoau7B2xD3ACgCcMnzLzPFGvxxLUvHYESeB8AhXEIok+TSWdhwO0S0bmSk/AvhMqhQORFPYQDnki0rJRHMR08LfiBe4fUhH7QYpXKaK6P6RrHv8Hzt5eabx4t01FrymjnLLddFVSV/fJeTTCrJLfkEqcvqGaCwdwsdviMgUvKL2fBFMPey8KBQR8N1LtEm6aSmfp37t7yQNJFgsgn3JNn9LFkMTu05v1u2b0sZaFCCwC7cLS88fV0eg8FnkKqVgYZRGeeBCkbqWWWC6+LWdNAJkITK/YwNib+hRmDwuqZwKifGWOwcsLx+9It8i1EEQQLWC5rpRsCLMiJi8ETzUDQcv6rAqhZWQXZoDtY5ARqVxHMUOAhGsyEcMg6Ccxil3B9CarNKAzPNaS6RUEgIMqnl4OEj0iRlGmbYyUoExKYxMTB2zKs/ZrmPTxqRTnsRVSPRx7YQSgaYgGUJOT/4V6kI3QnAnueaQVRZwN6BKCMTLIlNTW4OKRp+yTACrn4aqZz90LfRMQCZP8wUNoAWAmk0Ai3an782VWL7OLWM9ZdlMk1WuGVkaSam25smenTf/DNquDS7BfSPmQ28opF9jCccQSNwIhEu6Xj6EG+4brsXRhuHoNKHUewYFB/9amPXaAsTwWVuoIscrUxpiqDyFr7EzuHeAZYewqe/ai+0mfndSXoOlNi0CwdDls8U2Nm2DK4/msUp2btcsKQfFwZNY1GOPzquqFD5cA39jVxQtmzw1ili7E93ZcFlnGIPmkH+u1PlGD7rx74ffaPjdDLEFUcd2auN4/u65PwXpNEdSr2aUvcSH65/2safNZW3uG9YWny+u06gByelYNw3mG5QBohdM3oYRPKWfGZiByS5FAeUqbLmsW6Jmt1klKQj8YY2ZMW1VrEOZzeNfWLyzTRjq4vLRdv5p18Tx80tUgKitfg3rYHhcRAYlleg0hz5IOtMI9ck+eaPUvffFfmB9xf0z9svpLQFaYOWtdPnHQIWW852F7EYaJQgqp9Qe5ENAAaCGZaEk6PeaJEgqmNNRiIovDVJZnSJkI4l3CJqmhTSpQpnzku1TEur2aquoTg552/MiUydGmQ0v5LYoIDvuRAHJKFk25aEae9yl/TdHRdxHy9n5/qMwD2HhgH9SFVPSS0IrvuvBJq3Q4MH5z+0HNT9UtVVrXpyJbuk96qfeGqJawj6D+hjnupOMiTk/QD7h8uE8FTfEO8E7DzfVUP0gM14Ij31+EFBHcm5uaNNBXfcwypqQRzAmOaUlDszCVMe3DsNBMAGE9cUMskckiWK4DG4Gbn6k4W+5jRgMbGdl4BEeSg6Ra0XqR+YFAFQiZDPWcnbGFsNSRXLBwOXHaSPGdWIEISuigyyJX9/2sHW9XjFofupz8mCRXseyONcygVdRDAGBBtaTmxLHpvJD8GvFxBkah57kZbK0rzwSVZ2L+0g6d611R1sRJha8AkUBCjojOlQJ0PEGbsug4IoVD2nR340wys0HYNaAzI6OBozl2FmFj851fx8aXIJ8kbParal4eVd1yo87Ws+gR9xo/nC1rkwRRBQi5x4F2vsqCWRm7wrXC2c0tVmuBq9HB7jOdZ51pSnMLRrdsLILZwsyK3G9MGzfZql6WOhZY1HFj+uNc+3IcbFKPgkmXbhkWIPW+aqvWd6aaO9zvZ93QOJ85f2lLW9Xure5w+K6qdLG56SP1EHA1iujhddVVw5hnuu0Qkbs2L7NltixLRLB0AMDdzMRV+7rpO5Z1g8FvYj43nRuP6mObV7gRsQ956E6LMu4obMBZzo7aQre4E2bmciuyNHBdIx5vtPBU9Rjd3i3B+XJfxMBkCg8Gl/w0tus2ANNx50abU7HhVQ2i7pq8Hmc/AJzLjuqKNZmI+0QnVnEZx/Y7v+7KzBbForXVJbHr3/vir5ifBXb9AfVLT55czqd6vEjK97vt7uFA3KSKtUuJJkkx6QtBskOWill0L5xRhJhJFTnJC2p94e9u3zuJZm93h/CbRXy8cKXvUqXqjZKgmqmyK/1BCRV6EGm7mDor8Qha7Z3EsEmASITxd/uLu4e7R/nRQtLYxLMpUhX6BVzYexEZkfcBHoaL6ySRGIdDiu9RJsVP3CQgRhTBvD9PK8lMJ6mkpCWo0R9rPf8uaAPDUsg7Js3UxGUF8YewxYdNmLAUGLnd1cr8ZMTi3cxHgg2CHtCIxhVBdIpOhag6daQ1lX5cIBTVWglDKb+W8MZn/bE1xVHzBq8gmlb1ORaJcyGQ2zFolUVK/Z/LVEZROQGMggIGdlK8MqIS2Efqc5SPvtW+NqxgRSas4DGxRN3xkSttLYfhaitBB0ibp848FrRnZXIHepfeo2xvRY85U7ZJMiHWgbxIIYV77dsbyoQQYoluwTlhcFmWzbABEjLKHGwutNJTGJ+cuNa/zc9JKGG8ODESsKFYuELrrC6fGkSPxp635iv58ryfxSMmFXJQJn1jvxzDZo5o67NN3vnCYGFaMj/bN3l81hxf1NHg5SNdBmCB81VlVoB90ZP6c48Qu7DxtH++WbQ7WxKSCxj67DgrrstvHdsAHu2iHRHOBlt/3PezIStbswDB8q61phnjh44RrIN30brGHuqyDT11ii1T4Vm+I7YtSlyHwHWgQxNPbd7N+irrpBbPtIZiOpzQRO04vAapLQix2paAAj3gHgAkfIj9jMuhi66w5WftsxBK0qAK5wgHkIEesrDn8jgeNa+Xq7xcME5lxTUGh3h08WL28Rx3rvZlBWdg8tr87h9CaM9sf5mVOFyJKd998ff0P3H/gfqD1CneI1A5VYG29HYM14vr4WKQUj0oUQIFSRrdiAD7/pBon+jQiZ7EQpPFEjcG4UbphVPl04amGAuUKVnaJEiai1xpYMRa60ZdHLKBF9tUpbsy3xPKUaUkGurHvyRL7yZ2pA8p/Hkq1E3i7pQjZMcMAkufOmtEcX1QAwB/bveH4DRRovdqd5AiTOKj/cU29djciprpbhumwv19Sh0KiyPu4WLiMl2nRJ+eZJbEX/VfbbvuuF7pOcmIkAmXZH2xg8XA+lcIyIYm9LrVKvOVZXKCqlbP8jpICKyLGW1YSiCLCHCICVM80y6OhCgydnFBdH1eYjRCBP2SbbQv8vNTyl69pLVMMWPzBBNZVmRqtH9iDgJQiZcYf+mexwD0kZIfuIbsMFdxXJndZtYZd7QSgYFxrc+kkkfGZYWEGOmXQYSALTcUh2uR6imAFNNpVvV5lZdACssoEyfADorcZF2V1esCpyP0Dg7eRDPAer55tINVAr7yRSsRrhJZuSkWFmEJs26Sp8uw6zwHRWtHEI1jb2PDq4GtYPdaagG5ZfOO8Z06fVkddcugu23D4KPzsfVjnPWILr52Wjb908pnQL9xnp2GfW4ajziYzLLKdWzKcG6WIV9VS71kjGJMXaza+uIu+1IG8GIjBmjiJvpFHqpmvnBr15ZsC8GtNxHBWBwRPFnXdGU09Sa/furHzum1b9WyzaqqOMX1yN3saN7VwRQgqm44jXYWinxpqlD5ilV+p6IrfVczQT/u3gxvQouVUMzOroJqQhztRs/mICEErAqrIyt38ypbtCdFl2tqFKhULQIBLoPxNnFDjPhXv/hvzF90/4o6xQ8XjEL8lqxgkgON8n9KQIf0I+zthm6YeiC4X9IKGuOxktDlEJosUp+FRCFT5PFBTZ42peuZrVF30iQnxT1pjZinhMpktmA679R1kgnjhYfvJQ1hFFxh68jNNTMsMEl2fn23dw2oYhM7wg4u/HfBZJIW8/+Ha3aXYCv8+eaDAkakJj0RV3r9eYP7kenavJ77ggsEvrJ2iGBtASYcSe+kmwo2HEjuuehSE4MzOZePNu28OHrTH1XgjqxlsdS1KAosQUp8Gn3Vn5+wwgYXRuUabrWDxWs3xNYyA8/cjUI4A4/govdB+8EO0kzBejs1N0wlF7rkzp2tY2NEGajZdcHQAM7d9XCANh+Nx8rKFKAJkfIqKxwphJyBidRClyQKmfvq8922tX9PgyUrVVQ2K7ImdDr+5otPXn0yK85XbuhnKi9iLDNQWIpajK7VEkQoxM2K5UgcTKG6aPvi+xe/q+byuGl9cTTk2XO4cV8X9eJu/DA89/Pydv75Sdc3+lWzOaHz0mzI0AF2d+R/cL74dv2c2Smju9JdvZoVn+jwzB4fdWV7ed32i+cMhcokL8pmLQVSUmQwpCMUKWa61zjSJHFg3Q7MBey8VK4iW3ODcnVJ6C3pXxmfUZugV1TPUVJu90ffZ9TXT7qn1NFp9We++Nv20v3b6pfUf8xM7QJeMFzTBqYvL9V2HMaNqhVcCQN0StL2UqoSN0kCC+eYEpGjNPwEKjrkl4vvbWQ6VLXvGLpPPktytXe3SVI7NZXKCmeOEz5QjFJYffKRbCgQlzxxXknobuibp3L5/WOO9k6idXpJMnkzuco7UclhdwNT0VcHdv+Yjd3ycKQb6z5x/wfZ1qGxQLO2P9X/afo4wXC7346NuhtE5TdQX0fScar2A3vDEueQr7xaFxIrXE9G/ZgdoVz6XgWqfnd329RwJkWxURR3lAk/3Mzxj20HBF/SCumogGNEXJH7uo3P29L5n0bozXsNYwBjLo2LLf0a1kouBW3q50zQi3xn3NnT0/6eRoH17UsfK6lVSxICXo52hP3MsFrZgBkYuFsWgjJqzfi+DGQ9MKMRy4fq0kegQR4LKc8LG5eSTi5dTvC9LJhHrlks48BWUvxpcSyekQVoL8JVvc4NuGrRLtQ6VqbCgdmTq03nWl0Fpk+5XYqA2a/lck+lMtDLwcL8gkIeWKmEIla9Xo2uqILtQ5apnrKcIxDj6jvfKL1NvAHHBTh7Oh8GX85ixPbgVv6ET7gjIpA6MN1sJbb3Bib2e2aDeWkKU1Ko0hyt+5f7by2Nyd5mq+VmaStcAhMj1TSigw7uyC+btdbrtsd1iWxHsGVfLPX5eb1VvgfJoJAxA09v7UDVgo77IW8i7lYwc+tHh0V22q/LijCcS6Ka1xPIlhe4UPCXCMvbFy6uLIKoronuJCLE+KwMA4KZUNl5YwqfWXebr31TmnMgo58rrCGsg6JsPjcvwkfdRVsUXB+gbfD1xy/Uy5hfZbo09SsXL+NM7pJeLtxTxFsLjaNuwLXsUMzB+rq8JYfoPAAS8ZzakKYwxrL+qlgN9sKHI90WJBpDrMHocFS2V5QjsB/lZwFnbcA10zMdjrq5dWPLkAPRxkI6sJLe93/94q+YP+3+uPrv8UMqJU+cm+IxycJNUfWQsObwZ3zsZ+efm0PY7feEEv76QspIUnzes11btLr71Lgd/MMtxa1sP7jdT5LfVH7hnuaTcPfQsJ2OSjBrkheLMIhlIQCL3qdEx93uIoERcWbqXn9IqQ4hD9skENQTs6hFkjxOlTAi3zyJdvapMP+ZYpFIuuWnRiOgnehq7ve7QwO9NDXdscSzP0wBEOGxbOb2jeA3kB9nTa0zX97t7x4S8u4udklsSExN4rz99wgHp1L/u+/RCEsbw/3U+yAFpH1qWOAp3ydKgxA55K42bWYEagzrRFpS5oj+Wh9XBejEqOebBiyx9GqO6LxWeqGrplhKeiKXIIKSHwkM2DwuFfMgVXkl2qBqDK2E8qkqykJTzZ5JkfFEZj0kgsDCZUV9tUKUrUn3GawwAcZeRTAOvjFmtW0amoIkb8E7yswmBRKjkTw5adMAt5q3vW1n0pPFtyZxgZGMKWlO5r0pwAvguBHZ40BKqtTY0aTTUTFNnIoIHiAbpJlKDgQvsc6DzZQN4Bb4NdN6rmLMG+deR5au3Mz1CKDtcTuvnQfaWsqo8U3Qtu/cL2p2WxTzsHJPKz2yyrVmsdvyJPFWoF8X2/bq1r0qfMDpZctq80P1V6MqpOXbe4b2ZWOPSwROzQj6hoMqLiute26lXYU44JjgQXIEL6dh7nDGdrWvXzY1whBYRMhaa066YVbbszjGxfykKn6LDdfu6tJvXrg5zgHYNSzds67NgPkuD50ZyuwMpx5cvXDBqptYX0Rg4h5+4qE5OyZqqKwyr2P1amsRoTRmmPWqzeqP2nWsHdNPfexCLNpuLPSiXJ9xdIGliMPHLNrn1y5ch7YVomWfOTVma1C4+WpYMKFTj/r85MXTm9K8bsrbl8zOzhaqanMQQw2XAgpdq67zLKR7xob9MS5TI64GER7eUdsZ1oPHMfLFjunibvt6v+59712/jvqF818aVgaUeKHBmTXWu45hGWNT+xZ4iwVvisLaUBeIc37gi79t/qj7GfVfC2sDylB0e5N6qyY1EqvRU3WcusFFuB7DDeW1O4pi8K7rQ/Jy0uEk7CSOGSqT1UuVykTD/FTdz0/1nDJdYiejqv1dDazaSfdS+vNODV5Uw6kuzk4CkQhe7LaSPr3YHXSLCYMF4mo1VZ/ksAc/FYMOcdc0IeQgLxRxccrNCtRc7CYOKTM/7qbu0NS8JeNKBLiSbHH/MNJPPCongfzv9AXIFvZ9vWDHP8guLtT2UcOVGkrfqaRn3G3FDSQ+Nima0zgRPRXO2fMhkslUPaKruNg/3NzupW/0g7q92D3cTerre/0aKIDFwqJNIRkPttGkLmy2IlPlTAkwK7M6ygwFq2/U0i9YaNcI4gkphWgvFAIKkc9o3QyGKTlOzXDSGWqqLG9Su0IhmYlUeTJpcAfxiEQsVY9zCgdB8hCbO9MDAWGIvsVXVp966TJwda3dBndp01DFDHRafHj1tHRnR2B4XvTU4H2inwEY0YHjUCr2FJoU1hDbAhCh96LNZLKTXUB5ZFXUSUGMEQuiURJKZl6w0YabjSRiLkx5WG8L7szz9azSMzukCSQwFDYGENgnIbiSMhSTo511P8WMNtVgpeiUSmqftapYiVLUPwWqbmDUTNOQwC4k5dmp1i/a5drM89cn6njB9MaRQCvbJNfRZDn4o2YHCrNWMwBG4TJRIyP0oyZKcapKYG+9YnQs9JDNu73vtJ2FnMk10zPJxKa3Ps7szLetnc/zV6/GO19i3ZaLobj67NS0lT3JsrxcnOcxXyM6zcqVX6dxKm1dHtc5gJz3jGvG69UGvM0tcM1drjKiS30c2nL1JTesfDZWiMDVs03BbIrfAObpgooX+Vi12byuq0hNNiJ4nAPWU7Bl630Dnq2LxpS+0i+1esb7zA4tHCMpvo/rszN37Mpo69bMM52ZUhfsqKVoqlzBXzhHRqlzaUGBX8b2camBlMzhfPTFf2d+zv1b6leot2YO5PZ+XMzvFkxrLIbrhzRmAyZ7ww6Cu4dBgCl1aR3rYR8Wo2RN2ao+TNUWbmCaabG4uX/gkI7F9jM1RX43bFadCsX3k9goNWxMc5P8Y11bXmWtfSqPH8glyV54bFnX/nuE3LdXJu1md7s7zOI4FLjvUhQ8tbAehjRNpPImJawGfxjVQXwiT709pGmF5n1vH5eMwLibEsIIdbe7lPmVLjHZ/RslvR+PGJawiyRwLx3uOyq7774nrbwXGejNB3UtJXimsNjdAUBnq9ctaBAzmDrNnDAuKQ1FKAkGJZoUSQTjP6dBHnTjzjbPs+1KOhlgmcyiRqrfjAPecK1kVMuJII6dn2HjOh/3AevTNIj7pB4FtOhoNV6nxkREvKYCR+oRxGYcbLSosqxpXo2CCaYgsjB0ghN1LkmJkv1JaatIE36k2JVSLkBQF6j+sDEPpWrLi6ZxvmZ/kDRtqspIZVWnD1H5ByZbSGtYxv78CmdhX3phf6VZtf25/4iRK9VJTvJsotFjgtxZaUARQanwQWENuFx/wk5dtpLTwk45KQj7jSxfiQTQyXAkxUw5rI6x9sKusNMGhOQi+0H7CQVYir1PSsYzUTpjOJspAGNE/ydSFUTQdpbDXp/HpyE7FlF9rMrBgjQGnR2542zZRepPEaTWWfjGanHnh2/ub/rm+Q9dkLCd+GLTrhdlNcsvjUz1cSMYT5nPy/3586Xe5I3wb6C1r56NT4u7bK6H9VEo8+Iqiy3nG5gKnLFbHi/O3G0Y6xyBLHBr7Cs4yHnRjixVwrXkdm7sEKptsy82IQyAfNParJoN7fOrAU4HDqvVl6sceOr39bvTly2IWVUo9Tb+M7MxNy12hCBhEVsVbcteUyODqFgFZG6SjFmUZN4md+ymUSxYHNSO/0df/DXzy+5PPvlrT/6hKoBRJylgAymqpz6sEx0kkJpLbwUVjBTqgCCINJuia1KJVJ2myTHaTLPSmOBKRSzJuKVWSlj1laad8nsaurSUi9XbhykxfYi6rm9TkBqY3nqvT5SgyjGYyEFYDnpHaHKLK+kM5RFMyDOVk9OEoRN9+djZ+oaglEBukNkZ3Kf5M1w3Mx295GURojRlVrM2lHeiMjEBZEFCLMT1far+EhAA+7X3RQrgDKM63IDT+aKpxgrGyloP9SRZDddjOxm6sNSdo5H6tQkvbHbml70bpChDI2TbE8jys8+fM6fg6MitqLLAPI43G2v33Xf+HXYnwj4iewB1KtvU1kpGOSvUWwv37EKhaa1gH9LUxHE6dKGIkv5T83G3WCIYUUImKrz31DUv2stv/sziNz2s4y/kW5ddlnaZI7xR7bpb5c4XAKQsmNs3xacP/3z3Q9kL3fzu8+6D/+Zy9eNVvqgR1ncvXbfdrXYz3/mjV2Z7stuWzXG/q5bvvi9/t365YvfqUaca386qz+tXofjRcs2uN3ar0Gbsyj7/lv4txdnrrT+53nfNrij7u1lcEoAJkHGsmp/+6JeArDPbttly6T4aZSAQ29uiHSObQgLYQh6/v4GHbyn0YJOWrcpyNl+ZRZ5VuBsgT4s0M+tfw/r/pvtj6o/QP0sp8/76get0vx39NmzvLqT5eYvlDn69l6zxFsuZoUcA4d7tL8JW0ruHIudUmT1Vw8V2sqVDF+P3iBtkChaTrEnpIFqy78YTAwW4YkRDas8YRHPmp5k0mvOtmAe5TenhqaX6g3QW8kMpWc10i3Rx3VxPE2d21IowrrljS8fUS5WaMhKX39++P0ynuN+9lAFf93d75nInKdvtoUybhlZohhe7g7TljU790tuUz2HTiAQ29Ln3ODyZ/vdwnThCmpG1078gzQKJ3QaZTyWSUyv+IpUfyfLFqflG9IYGVB6eUdFm2GVXGx1KVwFAwac9ZWG6Jm8Uel2oUFADxMwBowmRk9nU9yy1VJJxNmIejXPg7CpJWAGclu3S+UinBz+XsYfA+Nz3xp+a8bI9Bk2ts4+0lGV9ACUWRQgtcuq+4FQrXYg/FlUY/ELQkm5mjojd0rIv+r1I7WxlOfeITdYytIoyJeVKqkPICyQLKcjNgxGxr/uWDJlgZSjN3VHJ4Ypuk7xARtRIEczpDJex0VVz7FvTEW6KmY0bwyYS39lsZtr9WXYxBlcZ1bQO7k1nu/DUSqAVHGACQUd1/kn+rLtx8aNZ897F+/J5bdscKNPhA7FAKFIb12RVswAoDvGjyBFcx76cjW3+sj3dLm5VgVtT90/Hk+cxvChyRApAF1/a0JRxgchHMqm21DN8C660M82yaeB+T118UxfB1fvheFszMowqGtyE+EYayPxocX/JcIzLAN96Pndj7Y7sYM1VFkovvfQu8XvNyCpy/IwtakvzZBiQslKM5PrxiX7yg1/8L+b/cf+6eq2+9OTJ5YlK6cRaTa1KKUV5FWQ+ZvAfDamMI9afCDUQoFGSMPXsvpoyibvDFDsxm4eko6ctbMPEm5mYxJftMTGEdvpOpRmcnKDy4bFMupDYQQbhUZiVKrdp9gpI8vuYDDn1NxPZbul0EwMGbEgCYkIbL6M/U9pZCwb5CbXSsCv/3SYufi/tmfJRM09ul8HE3PyDInNlddy9bDgeqVTVWXv01R7LPAhP8/NOIkb6tYG5ScRhq/uzuoYDzIIIu09OfVFLY0aAI1NP6wjLsF0cFmsLbG/Lxj/vTl8VJSm1L8pYyoCVwGSerxHFh8JVsSjzEYYgelJdb2b+qSSqQhkd1ehPZYZjaPRgitSuQxlHG9uhPD152qwM43xd+XaxGT+uw619v3qawcTKwhZRkmCp6JLpImtlCBlHL2T35afh+O+J8r071/f/Y7jM1uv8xXYEe94cZctzW6y7zcsgQnmwDVd8dgfrL9QVp630fyiue3fOju6ji9JTaAvuCcIKFjdnAmEGali7oqjnPvzxsyL7A+3vWo7L4vOrdx8Q6A7lKq58HmWODStfmfB5RRE2y11BsfblQoAPdrHHheKo0nrmGst+QsQpTT1s2s/LOakCrgsuqrO1W3L6KQ64Kv1p+dQUQznEi1l3DeYMm3VxPouX9nl3Frql+4XjePVCm1mWtS1vOYCyGvpPNktTVK5tfLWL3+9a/S+YNmt+CvehcubPsm7bfvG3zG9zf0idqZfTlIHF43C1C1JHLvr72xs4k7vbFLTu0sSA22msmUgZZOYtdQVbP2mO0jiSUYqPSew01Uw8a7usEdySXnJkmgwHeFQ1PRpiqgVPK/8wSngnQeteKq5iVQ+HUQNSb30vis9bCUUvHvtd6FcP2TZWOGScpcwVWJxYVlr9NDxK9jaNmxJn7xb371QKz823QsWkbsEwJ9c1ZTCiaWM7IC3K5krqB8HC90Qv4B+Z7JDRE3SCBUMC9t9lueKoENhXy1aRo7I/eR4yVUnQGVkeBYyqmTaFdSBglwW8rClLmOlAPxqwfRPmR8OJIRYrsOM85DC3VoVKS6GUuSEez3DjLgJnYdiZsc2lOqnn31wzXjZZVTZfrvI2pvQdnaQXNSD+u7gcOzPHKVWA6u980tkmB2D8xvm3+y9Z01jfKZx+aizWLNd2pOm+YKqrNzD1zGbKFmPI3rZv2RTCfkHwZLJxVvW+Hstf0Fez5kear/iTpn56bPb74eTEl/OQL2zxyepKf8hmLXsy9CI7Kta+e7bTR2ER/E1WZFr/9vnHsXftLPbFR+cfxT47qi9dOUekp1Vf5m/DPDOL4JbudfXiI5e/eKXbF7E7qYdvF/nlSpoL34YP+ekct9FeDrm7MPnqS+pFPa+KwDAQl/CoPWpmPvq6AvLhloNMZAVnNALuQr/o92FjIzCxOprHja8yzv9UooX4ddjU73SfqpfqLaM6LcNykrT/0JMmTQq3yU6SHjmJHThdkFaW5pvht+Zi/091jkmfBA0w5cX3nIWYxv0sxo0KqSuDMqCpZeKFkX6z9weJj3w0zeOYUusilXivLg7T0Chl2PvvDga4WAwX47Uk7LejTwrmaVQHh13LqMPHqdg34lrvHguG0gjxAL8c2I43yPkyRUUt5PhoZgrMfT+Yz01b4ts8L3QWZD4zxxO6wpWgfyyMaWGdacEivgra1d5R6FdyEOmZYmJw5q20LhraITOzMqjHk43KqBv23npznOlI3ZCDwfiaLFCq7ZTXsSmKs6pp0SbJAqVDC3SqYkeFprjfirgwU8WClFFUwKoJPr+IOSzIreLHMWr4Ewa1dH/Gvur2O64dOSiZZOU0+8KWwR1n5eY7/xlg4v+ur7ZMlOjL/vZk+HoZ+iws4smCTXDpQDj45jQ7CZ/Xp1nz+kN5XzcZKLW/yMJxJSxY9wDYszqrXNnavD3Jqsv4pcF9pWw+Vad6FhEa1tuzqnMv7Y9kYXbZUZJl7XxZH/uZDECbr/1lQLxaVH72A33zKovHVbbMnn9l8W2Pq0VTiAiVTd4YOJ45wsf1N55mZ5vGbRhkwsfmzIotYosrAoL3XJ2ev6iPvr/pXf5yXb3OhlUx13lbuAFhQgMGWupyWQ1Lo16Ooy5xxzjxWana50E3iMKFxPvs/f/3v3vbp77QPwgb+2H3R9SfYuy4SLFXGCZ9zHYPI3i4Yfv3lLWdT4V2rMCLqTFpymIckrKpbhRSPrbWUzQnEaJ/7NOR8Tvi32RExXv9gaHUfZpkwQ6iadT8lEJlviPlfg89UgO7/9MY5mkEgIwXlR8OI3UO3UlCVm/TrNL7aeqnzHu/u1KpCv6A8HDqYno02VEkwJzzd7e7YBS9SULEU4XdXI/smQ0v1faNepyJz8al3cNj9el2MudpxD2Y6oPI/6/vP0ydn7xcknCaxgjd69c3C6ntyPhMtrHrvNJzswnP8rWUhHNyNvw6U52MfzOuYpenpHdwdwG5ATyIszxzx3Z0mCnsMWa899FUs1g9lD22BCdZSgzHD1lOjMtE7F+buZUjkBxq6oUEbLB4LLIhk1fPji/Ot0Ftynwh/UGcXMpZGoUcAoK+lu1JFAazRMFwDxGnlhkynH7BA5E0nryd+2RfC90v2Db7mWQ0gLwjOl1y5pdlzpTsQi2o24e3bAI/pHJpRGU2mzATCoMIzv2ivvDrUdmmBf4EdgWCBeA/vcpVYbb5Vb5t4qfabViX0m0T7ZvqWr8JX6uLnC1VhUIszhSan7ddDJvsNmYhN80cVnac9R+aNjs1oUWQaDOzwkUdI9xa6OGQi1z3liJOE6l4sHEWzvrjFci7XerW6LH4LIayb2xDJSDuUmQ7+lKrbX3y1M10NuQeJATIHILLVlQqhSMflgRT6dQQFZd3Oi99v32r/FsCsjOlLf+5+mKZ9cdly3bPInvm1u51h9fyLqz8qIpQ4DpKHkIKa0GU3dIeUnpd9bi7ma5Aal1BLzB+Mn9TggHluIcrby98c1K8jjkO2Z+yKIrbuKwpGfVLdop7G3K9sOGJVd/64r81f979s+pj9TvVv6j+NFDl6jBH00wtOtcP9+PwcL24T8JYhm338yQd/G4zzvxxGKCXGRR+vjjUpKeazpB6x6VZnUmoJP1N0JUmnPvhVIYkp0r6IKw3qYEfU0AiEWZi55AwOny9w0Lj5kLS+3ADstP5QfMociOJMhkaH7rR/6lU1F0SLiaaPb2UuqqkKD8dwGGU2CVblGQQhzySgjOLiBnTNLGrJMAUEGIKDxeDs1BwHNeL6xNzTX3xtVxPyWCn8jyOWP00mJfdrGY/FjmWXJsWVNk0WHrr6DOYPrw33WjOxhBOzoxxc7TDCjUdoqDQ9L4AgJj8kuobK4DgqiN4Oqn6MqvLpEPMOJFdHH7JUvMijhW7Ies3jRvbbFRplGjImbqixZs0sJxrukg94iGLwbCeU7PbRRJk7CLXRjJMSyBMN4s1LJ6if8s2G+k4MHnBbnZHtp0535mMw04pmAyDFXfPQQyB3emrPO+M+j5sNJKpcIJIafqcRTGQ/iXYams2PpNuJgCe/mPsOv8WLh/njYB+aA841cxtRybLomkReP5hdzFyhM9tZUU0kMlURBbcwYoIPxLbp67NWjd+06x72+uujCsO6QmIbktd57+z/9LZs3VfR478Ypqt4QQKG+Z6cVzUL7y/Lt5QObrYOH/sh6yFkeeBfUijzwbFESQmIDKYcYyZJrPQvm7q+LUqtJnJ89JxHj07MLDddSgBBrbuZq1/la1B0X3jhko1zQhKhMXGLq4M2IvTdW+96Tppg4WPUK7NTX7aB1+6hSqr/nz1URiwpvSgLsJZVn1SPnPPR98Yn81C8ROLHTCl1Pl+PArnJpyG7BSsJeZJ0arlvyKceT3vFSd1IJoCDVy5zsU/xTCH9bDuD8sUUjqEHnF+X8Vlp1K7yaI4IYc5/eKPmtH9fnWBH94r9g+cKiaT5qIdlOG+U8b5u4XhIZDOeOaz4cXxWpLhTXMmOBj0IJRmdSZNLj+RqV+NSlWlx4gY7OSlAlMgAZBpeqKpu51SVmLbnAm8ffPY4UOq85AGfj7Wig9JNeqkr++kzH6qUm2dObsg0/mSfCZp+tgrfcEpQ49J7TRR4krfpWYheSDQB3V9xwQdFS6n5azOQDll3ci4mjyLIkbOB3eeD8CAIJlb6eDx5XPGmTPJafmo6uhmlPRSrRdkBI1p6agjE9najCsASHjTjfN56xt9KQ+y6ZZnu8VSm+1sObe6s0XNIohWLTX4fuYoqhI+48va5aMr92HgABmsTrhTySEzd5tltIkiiLrP67is5prjaF1tqlxIjB3ZtisPy0FM3lppdZ4mg8IiS9+538uO7Gia1zn79DrjwilbeDRH4HT16rx/dm+3nX+h8/woy39/9mXNsvVTZ+vB+CKobFvvVDh2bWkr2/OZP/PZ19Zh4VZH2d5kwwl8IGJqxAFd02K7z/nQHhXnczCYTHtX4Ipw5T/VX573P+9eXL1Yf6zLLG8WxcKt1+GEmWZF2fDLcgmLMCBtzPCKPIi9AiYDq2Pc7PqYLw2vYNsuxty8iC0byWVOc5GLrcj0DucHWwGdOSqCoCSPwdBDyDa+o4b3L3/xN8zfdX9Q/XWpG323yz+1zT8+KQT/T08CGA72wJG913eToPVmIR4Vy1U856mac/avUPSFdCOEFBLU6vE5KUm4q2V6905Ks5It82lAwhjSZIJapcFfUkaaOP7jeHLp9E1NesMho7WXglAa9Ds9l+CxaXk3TSSnP/ePubHvfeZBeuTB1WPtNs3wur+ZMt4SmtxOArbkzdMgXpO6hw5kRQS5W38hrQvYO0KIh93FlcG2LnhgdNOcSDiJ46Y+ZIQF05jxNLfzvbqZhnVJbvz+Lg1ekGYKpuTeqS2ORP+bOiv45CaYbTlqTlt1fFZFnfdJypOxjc/Cw7hw6demKagggP3JDFsOvZxx1geIK4vrohehcEFPAAHLa1zlunnHIQOmhOu3k2hMOlvBI5242A7g7VmAIhPn5zhrRWZRKW8Xue1nG6caokXhpVsR9gfu3Yn6TeRyLA65UIvSn3NeOJZcwYcgsG3OsPdxOzi2HFWF5XAlSRQE0FpSWY6p5jNbCo5JkcdEifrFUUWBHdU//GN9FsvSHvFZW5x1bAUyZOCf7tQATqLmA2d81aou66rI6ll2VZ4gxKnMKrS6pR4Gny6d9DlK7w5TcSbCzbqF932dUc2CoCpT4DhwW8Hlp7MluyRA9r20bgSmwxRYwsKXZvayjRWFDW1EeBE0QhNQH7ZrhMCni+A6yJiKMsYZfLJtZ571sqCPGrcIs6qqQwv/iOhFMjO46Yv4mVN3vutW5epUk8A9y+y4qDZA8Coreu2yyJnw+RjjMttdyoiIeQ7e7+vwangGkMj1Z4P28wJkqLbtcXV3GkYQ/LefBnsW6ItxwC1IjT+Ow6J35iTWY/upXbl6mLlFsc5myyyv8rq3cOngHJQUU1VIIvPqdd/0rhEHAmZ0aW/10W9uXtUNO1o4y6o7f97/vsqaE9txSLw0WTHhU1OEDqzXMrdZHpvldKPrGFatCVi5Ja6Czvt4nX9i1zhKJRjJexNNziTTAnyMc7p6mysD3PvHX/yX5h+4v8BZ5f1wUOdKRYhANEUhJylyJ/6J+DbJ/5NENgyHRAZw6GEy1bvb9OAlceRJnv8gSl7NKQNDgof5oWf5MNrpPtWe0pP7OCZwd3gAH3mDzNtF4ESOkbz+B/Ve3+4uAj8l6c2GYYY0FPJ7VhdYrktYNIi6uFEHVW7YAohSR6R0O9zcX0tPI89UcGYn88OlFCZDf1Owc3iQxDTHQCYohPkN0OlxhoI0QWzD4yO59lt6BHIgCt52F9KJraf64Y7V9Ot7zg4jMRJwlkPeX9wyaBn+Lw7clDpyYWRUfWYo6QQrXEW/NHXNOjR5v86DPBGOIi/OMt9Uca70zIzSNVDInHBNObmC/1Wg+k3wfUb7csVlSP2UsAqZ8UcL5WwUeHfWsbG8/djOzY0dRSRb+O7ILWe4tgwF2IcJ0Ko4nlg6AThyOpPpDPOsCnxeCZ/TpO2Qbylw0hx+FzitpeUI/UMa0omYhW0Dx/7yuG6F4sgYdB0CswP4EJ+CYGWQodVNPOLk1HXZh4otFwWzr+qgIsZbOD204TVLAyLOc9kPl79U39kWYf9yHG2R2dU690v2F0cQMZCuuuuZ9z31RTG8sMV6dvqLw0dl3pgimh8bFn8pZF++rMJVW81bTt9eY/ejVWA353rz7GG9e/biql2Ubz7/Uj5chLfPXXhbzn6qD9+O8UVR9SrH6XBywgxgF4vl5uobq93X69F+NdrXz1xYHlFq96OL+ms/oj9Wvo5H7uv1C/YVm+P+bAXWpWtVZDZ1X8qg98pXvciUjxUilQLEpvCwdb9x7iQ8hQfKz+HLXl0+HWIa1ahz6nnDEVskjGgLSsdBpjNftQEMJ9frdQ7iJcNcY3oEGIfYRj3N/WKbie5N6XMZHWT1l36wOJdHfyVpGvMpqtKLgSN3ADZBngEW4WuLgurCKM/pQMQX4BaTduebX/wt/Tfcter53IRDB+YbtZhm+cJ2t2kCMAGE0zXTIwPAAUCQtgfJ/piap0VoM2cKhOTrVA3y8IVUAlkk1epIKZxP4h5GMoJForu5v5MhxSwHpubl2w9qwrn0HBgZUDXNM5EMcKo5PHCWQrLi9KCm3eNDQZOg7TD8gCHLB5Xw7PBemWz6sE/fah6I/iOKg6+J/bMq2rMiLuC2huNsTzWMNNWmZ5QhRmgpc1AVohje7T7KiEXbMGfAJ8xJYpC3qeC8YMrY4PDL1sQ0+c01+XzOEXjy6B6Sa5YrvMzaYQqBYld5loY/9dXTmnpQ+J611KY5x9FIAZ16V1YBqRPnLADELTrIA1RkyeBI4O96yzI+wuqf5SMQYzm6RczKurSfVCTqpig/Wbi+aILrQys9QpTtUKSrPSL6gAW0yZ9lqxO961dGLxdP23eRAtcXbsU8hjzf8ww8K3MXKqNmgLMX9s31+99iuyWOqa3BTMx8A5OoTf4DNv8sZK/1Sbluu/K4nge1OvYLO4ydrzjS1Lg6e3tx4uoFOEFZFvGo6IfCFSXBfWWqE1eC1dz6vjxmGwynIhhTqUrVdbY9ucrO+Cw/yRdRyfb0yNu2018eFi/jpb7wz+szhi+xyVZ8Pu6Tv//Fb5jf476m/mX176u/yur542Mx6BXMnA/ETU/Inebyp+bmQaYDD9O43uupzPb4+A5h9NsrfSvl7e+Z/jEJtaWNZRri+45dyGk53qYU4PDd+cIp+J/7x0k/HESyS/P2D9MHGLanOgA+PT21ceq5ud9x5setPNTj9lAx5G/DdsogpF0e6h2Hyf23nDtwc39o3bu5lloCh4I8dvpNjzW7kyefyMC31MUsHYopdhJ8OFU3J/ZahiJJ7+Jj++U1S/NkPjuphcgTSfwuVWbAFVLMtMX2Lu9TTHWR5ngmVe314TBwfc1PZ3xyjDHrbCi+FaxE6qsiuiBxBJtnJO9HYTWHsvJRHjrjqAO23pn5SSw9R3LDFRdORpvK8BG2zrOeTlk1aHvmpf+34FNslZfpvRxpw7ADNoywGOyPpve8GJwku71M9jW1jzIqWJ5iQitPk0q0KEptejQbE402lSgzKrzb6pubj+nkDdVrJs+zvjHrvJBODnCAogPSqKVppDOMyTUZkY4T1kVVfiVb3Qx8qi1chtQ7alVVNnJEdt0wxwKQaOB6tgE7zvrvjIQ5rWdKCiqRz01jFJDJWGPq3qL6m5IQqXXHooqecyi+t7n7VHJjuAy/Ui4zPhSucTk4gA5l1uhF+9nS4jNZ6cs5blCdD6v7JQDOleDdnrp3ePxuyHZNs9J15vPZvnm64nyoggLcWQPm4mufs0iMmEYP3hbl0h+vtWiMSzizKlbdR3zCLUKWM/1p7c+q4mlRXpY33jTOPm/u7Lyt+mw5X7vzogHhiJdhsLYL2RJ3JfBhJu7Z7CvPjtOzd0PEwrALV0c6VlMXHOfMUcIA8pA/hYO1iB4HAKU+9pmLTVsB8U5dXxld4+bIU1bKRsOXM4wiO5JZ0LnUVaT0qxvOFM05qiJ/1pP3lKt3lsKQnkOzomVrVg46wSmdLjVoqbxxs0Xtiv5jlsqwcr8S5FlU0swFBkXnL5JFmWQt5IEJclI3+IRuw6l/01MB06yH3/7FXzf/m/ttetBfJ+pNOp1DU94o3tUfnikwvT7IjLH0XKQxiRSGRJ7nafJa+v5YXR8aXgQj0lM/pmeq3YGQ7+VBt1Oa8vCocD6IYBqpwpcnKY8IfyUxKvtLcjsZpsvEC+IEyfRMT2LaifJox2fFPlxPUc7tBcur6dEjUw/id59PcnfQ/kqVlAj2OC3pcbzk3fTchi01ijsvHdkCTi/VpMr4nt6ZqRlxcQ3Uu2em5VhdsCn9UVE/dVwLjeInLB+Pci9PiXivDuh6u99OD45LT1dQOIrbq8OcORzmTYLahYzYGFP6aDHcXB9r+IJjvd3fbo/pFqZO8bvADYXD9Di8sB8RxO1TUYtZ3qkCBsb1QEn2rciuZGLmdhJgq5QeS8+0kBwVnIX5+yy4tOfuNuRLLsEwaYJFP1spzo1RhaxgyZmTt+gjwotPomQGyuZpXjsO8GFqwwR51Crl+hnHkbdWckU2yXBl/IK0u1zoc89Cspdx1vJoBqnDcPKcjH0Byi6OQhefrl44P1xGNoKAZVjgJraS8WF4QBc+XWUONseONm6i5gdl1LdNbUzcsDwrijWqivVqzmXDsVa2mR8zBMz5+Ml5e3phR83haYNtOLDXc0QOcYlZF1dyGjAiKMpW4jKG1obSSnujyeGSsk16OpOdEfGjUrlPaluJBmTSsDP1GPKw9bXtVOHllBlgWhEtHx7XK+UjSQ/JJbZ2PdZtPivNUTzVJ8YOX+PbOGWCOSjLyb+kkhlbDUW6a/xsXJqKI7qi+bMRF0yVw7nfx4wdpkZvJEz1+XLb+qwMG38O15PH6oz9BQizWM8DZj/b2E2RFZ3v5Sk4pXOxA5iW7bDY40ZhCbpt1oXQ2x5XwrMxxuYzH/PjD1SimnwJzC+XQBr2aLCXaslnRdA9mjf6fhmrUiMkD8z3sdEymj7zq7pcDKprCyzBE3/E3qtFU1SUKzs/9ktZcJv82JdZ/P/b+7Yfy7Lzrlp73de+nr33udW5VNU5XefUpburuy6nemb6Mh6Px2N7PBPG41HiKHZigp2LYhGcECELghQbFFkRIEcGv/CAEE/hNQIJ4RcShYcQIjkiCFCEAggkECBekC1P8/2+tU/18AfwgkrT3dOXU1Wn9t7ru/4u+kDoA2/3s1BYO/N2lA0m5oHI8lHmPVAF4MwWaF0rV9pyTM2cqvG0Fn73HiRSWwgWJvHaw1VcM3uFcoW+WwzOTF/WqlKZ5bUkJQpJH9OxT+nFbNmcc7S2izwHEw3PgmIGKSMRITdGNTNLPKfY+pU2LZNC2gyofpQgeDpa9v2x4HT1vPXO9CRgwlnBJtC4YTDXQdkB4zHDlAAJ9aQSdXtRhtQagIDLinJXqYf1Sq2sHkyRU0KPVQccrwKTXe0mIGQ5duYrZprqB8wEUUOLLz7/V/JKf0t8VvwEY3k4NK95gsPqNRjOrM7Ein3Tt4ZUDJmJmy2KSSdiySJ9sajmDRdr/UaAC4c9DMejCye7EBdJVxszxrqTEe4qZcy8ltG2pssCW6/Pyy6uXnfRdWu3+SwG881Vx36iDgCN5sBCFIQqdEvJZ9DnfdhgaLcQGozE1tHcZlsXb/2tNsyhZIT7stPow9cfbgYcko3t1DktpIGQt6K4/Y1p8lacpBCABZViyD1rdPDB5zVbsTHGREWu2J64xpeRp3Fbriy2Y4oiEfMUMXiwGDN5zU0mxRSfYsGWUIHK0Aw6wuV4kUObhx5dKjpgnYMJD5gWgX0w2QuT3TNYdaOCoYXgoH2Y9mfZauqzVE/NktUuFNPXE5gqMWcdAxXmXwQKGbLPT2Ue/Y1h6QcdIGzjTEBQx35e4A2GkIiSmkJh6bjn4WwBzF8eGJJHv1bGesBTqb5ahTlqQSrhqbjWUAFMmSDC2oRUho2cH8tyHzNKatqvmetuM3xHDjWyxbgLvI3oPoHaGyeLv20zq6k7t0WVQT1ZRaoITrHdNesDVd6hnvTIuEN39+hBlb4Ea20FsT4KqPfaWiZja0vZ9loZMn+afm74uMrLpspH2r4aZlSGJ7I3VHao29zsp5dSUXP8YJWZ0h/DwDepzK5whUr7njLUQE7HBgRagx6CgvCmOhEn9fBg98HjI+enxcNBtgsgAqWheVbOCnPP21lIMScqM7MHOKzymAdBZAAEN0UpqMSFc/IYg6rRxIUnvf4d1X9YXlTrpuppXVfYc6QJEwd0SaEuG+eDnqrOFm0fwwIqxxNw/R1nPpkwJzYKJiEhq8+/n79s93f7v+fyOlN981WdlqZ+6O4qM6U8pcbuIDSDMDT2Dt1wqyv6mm40AsAioGZV4pPPf09+R/+q+KT4jPia+K0bRz0bcXOYGg/bLVommn53Gjnbxr1lFU3Wv+szNbOMQyoMlxZnnefm/WiBt0JMYFg611zryMyMLsJt52cuwc+eR5R8xP/0je16VC6SnokYy24ASPATZuR9t6LvFNOfim19TBVksoo7/G5NR5Hk6vpyeRWdj7fwwrihi4IYXSP/1Aw4vkRf0xd66xebbnfacdkBqtTX9xOqnofR1muzvASmN+4XTMdSpdb8nF4cl4OHGzg/LLZlKRr0q9UpPthSbdxZhQ0RIS+iMaj8Fp3hAqPw4+wOJdXgOt6sjSoVgA9iYQUofY7tOBO68AumORQNKH06u4FYlIDQjLRUASQ2T1M6/u7YDRkFBE8W64AmZpaVpGJB00EdF7kPfrr3Cr2iyEeFt6bnox4QPZrO8qHG8BsQe/Y4p7dD7TKDf1hgnf6lUTAApYh07iyLnYpKiJkw+7oeNoDzxGzad5izag/Zh/t+l8rQMsEUjN6YLbImEWtp+hRHUolUqz54g3XNBXVemC+gLqViNMkFMF9hqGShge9JWXntZ0EvkApze5HBnxNqHBoOfzpAMbFOp77x8p/k1LNSZZi83G4yjBsodxtIwoFM1zPwJ0DJGtTETNrhVbErB/dX5ZkyoK/QyywXC7nMof9x6PZClo5e8bodyqGiRrYx49p+LOtXg/pReFMtvzh+2aZVgoG2TJXo9Xd3fT6lu7CrgjYut6JX6xnde0qm00ROMrrQZjiRYVYy4QKWmgmLiyR1In1mVufFmkv/QlJF3c6C+ejLITsyWdBBFYmuZOlUIRojqXQD2cJ9pi2PTFIqPZDmKDH3shmeq/GsqJaJ+RguJhyDKQal9WbXnIt5TpEeyxKrC52ZO4An1EL5FOAkI/+n4x2Her342ewuUgKcAzHeoXtpfXa39WPnXQ756uSXogpmoHthWAw74awmx6PiZOFHVAzLs+ywyjlm7fzu838q/7b+szt/tPOfRU+ccMxCERS1w7pJ14OIqk/mengDU+Ao1bVl9He8F1tEaQgWuOmAAcwgvezKEXG1iuggjg9gu7FSKMctiO8sbsQWtwxxBhCsbZxHUpBaPZWY9W0GW5nC9QA/PyRTPheHl+vtyo9XhPTyhmXGEK3MVsUHYRGf4DKWR7qJ4CO82TMImbF244Cp46ptoNtPNcD+hEoDkydwzMtkpIQb41WWo41KykTvu4mEmsJRanepTrjcb+7Nshqk/XCdpYnVMxjHCl/bQSb0hrk9dKo9+4Oy8oHPNdYrrXWpdxaO5VqnDVX9oq5PqsNxeIkqCszSVGMLgx4WUseUPG2i87S5KvI7VhWUnIxbTrRzRa9N94Nd5a37YEJhJD2QflC4bKSpyTC/7+hzzeregpmllj5zDzAFUFIli4dRvPo2VgeY1NGTHkymsrQvf215b7Fn2PK6SCsMo9wwdQwzUtYVxSDdf+eRnDbZfrF4q/3zgW0P5YHXWZZREAltktOFq4e5zMrczDJb9HuuvdM+fr9+pnf/fnvYNsjOdIQVYkBO0cRw2KPTdeigDVEPfRaqkZ9Woh7NTZiWU54tJdU6U22RjnwIwDwBYKCsBoby1WF2kIZyCE80o6UpzHE/vxjsjX9sl4J49iibTQczR0Xj+8X8hz+fNraV5Ruz3Xd7vSJ763j+CXv9YPfPFWmf3lJh9i8rs1dMqlY8p/6zf8/sUxDX2l61/pm728JqfUeIv/P8e/Lv6V8QX2EOwVbWOxbxy6Ijww3ZSeoFvNd09no3K2SqwZfwQ1k/BHgGQJ4NKKPA3Reik1J5GhFuUSCw7eTGtxv8tpDUwixgSIBfI8rnhik0ZfEsthjoYHZMv9v6Ry3KrvIYtnvd76Lj5hqQnIvr9TW7h+Iw3/htd/Or1VXUJk2u19G/k9uijutgIhx3cYMsXFKlRG9wvbhacFOx6TjrzKtjK8EOVRThQOednW9HZ48jruTXzaQ3Smt0BDYRrY58OeDk6ITVTTl0yS7GyCHXlEGpXUR1jVgqIgUdFCuwuEXUG0nhCe/3RSubxA+oVEyzAJVJeihV7lvXF5WWcpelIkKVU7PvMYChjzzo+8DK3hY0b/DT6HH09rjXo46mcIENkPxeel6uK+0ziIvnrjWZZY5BiSFJEbLaUMJVrKIKzyk6XXKiqoIJ4pCWgj9KNwwCVkQyOZTXaxjZs3AFVK2Vs9Q0TdIgCpubSYBwFLADFDUGDQtiQABSf5FizsCfOb8fnascd0mptNjfJ8HCOfPQBleJ+b3kSfno3v2xrdSJ3qcD3gSIuygxFLtJ7jTYfPQma1NOPaXG+asUE9/L3OywvqJCoz48PH1aHDwpT9jw2PrGTdNs065eMyOdFpC7UdZWvkk1lNqTognB4JqX8/xTbf6RFIj7JKmHFwejj1LuzPpZP1VLUw9CUQKcmPVgkxvalcs+MmhW3itb6GfjX8iPps9c2B/YM099uDym63sweOpPjqQbaTOkYoSuo/Qlrin1ehO6+mKvD+BXha4mgyf30jlzYN8N2V0xHFVvN0WVtCETNkARB3cibQM9J3u+lxrxIA0UB37l+T+UC/1Z8UX4dyb9OKqOnG4T0SRFZ3bZRghtv0P38ZqPzyp7Awy4vu93A292KEq4l+h37fiQnbvjFDyK/MZgg0yYMJ8Ikv/ry3iIrrZmbjjwvHx7JqLEHifHNkYgHnwMPkQv2hqNMBiH8TdXETWDviC54uXffbG1iWNETTQXYARwFOjcNhUXSPcf0nDadjRYiF9GTHDk3G4hmCwvzlPn1fVWm2Jlks+N910i2xEEj6geZjMILaOtGyU3XlUY2d0f7as89dkAuzek3UqwICbUoCZ09JKo3Ca77QaPLs1BFqjGPy0KJ0dUCMM0O2F1fCUh3BtVOzt7bV6wBZEDb8UmSQ70Quha0pmS98sasUlEqHbCUm6aZ6aONWJ4rqvZWZcHFZKpszzqdYE+iQvQqKOwwDvCAjgZnhLQi+vMsEKVkY0H3Mun9M5zI6+oINVvqvunbXVqRu2soioen1e3UFbXEFfXoBfhmwcOEX0Oaw3pni5nFJ4c5Giw/jnwEJF7RzWt1x6Y6NBPf1H4tUx309f9sFY9FrCIrksgVh44u++Gmcz6ggJIRX2AKlwt5UKluemtj8ayYPAt1f5McQLVKUC54lApCnV0+cpRz+4v5CCbU8RKSrOXnBULKyberENDcYKxUErxiKoI1006SDNMfUSBAULCVmiw4GX0D3vbKROfBwuLNpaRB8tK2VEvrX3RZMNCezcLfV1V0AqVbBjhLMWWNvUDuztMR4AvKJmawh7Wd06mr1jtdhLx2ef/WD7T7+/84c5/Y98gZNUzBoHccF06U5/trgWy5JSeMfXB2Y3nqCPhXEZNpRer8U5MaevveLWdDW577mfCxqQZCYk3W38GJkdBpWRbftzszZarWCqbjozYoZt5D4SFuBzcaORGMRi8uUEEJMtrN24HLk9RIOeazlXZA7p65idptsxS5+RQCozpKK1pav2wAz4eFtQbAZkRCuoTbYSpUkNZseQSHZfcY2WcY3qGozSga+89lbmTfuWrbPKsHlJCrSAijwylnGycWJnJ1FtnqZPehYm1KwBTMaYXsWxSQ9R0IY+L0joRU3va+2t0B6kfDD7JbINs6POiKfF4Bpm1vihCrdRJ1fv4+M1eWevTLN8P4V7l/owz/d3q+lHvJ2wZMo03W5xpsV+5Ner5QOnsjbPXH+zr1h4E3+rxYdnshXBa+X2jetRrL8tgz9/6SDOHEWHqUmenWTqgLrgYZqv2LMhBSPb6d/PTMBpyr40nWCztas+nZdgduks6+Y56jFpgJAZDIuodZG9kCnluP/nweDz5enp1rMyCvpVe8u9Favp60ivUmB554VLMvCZHPju8ZzEj/4vP/6X8B/pzFP+uxOfixvVG4k+cimHkp9rO7YkJ5AYW7DwNL+Tivr7avFgAdqH8Kr44QlVmXJdy7Vno1c1ruc5EHnkSiR6sNPbCDevqmh1gLhfbY9Npo1yeiavNjZjY+ZZNij6Pu8eo3rCdaMdWMXZ9Fy9srSLBbjDsd3quehOR6WsWQUTr22f1FPz2uhQL86JG55ETlpZLs1r07Ypn8p0HDlAwEYqykd+uqJ04p4ZByNJQCKUTQJ16gXBHqabFDt9IEJOgIojIh7VXAjUeZpZAkuuxUWaCXr4qc5gu6SyhYgZg7p5MMpR5CJvjHO4qbK4OkBTlFwVB6AIYaGS44+y+TPFJgV+wmGjTccp00kq5r9AOKfbYYsP5XMrTKllRxqATmbWoxGQvGVNJ6XFYjcwhLUj5iXVEP/gWJQGYToUE0ogRKgIaDCrIHCm0qYoGVDg6k+JjicJalL5axTqyhWetPywYx3JAf/k7rqfXaXhqLNRexlldPVmNTpYZJYlHhTr3+z/1JpRh5CN/lvF0Cip/tT8vIMive2fVY7c3yvb67Z66DCHoI3skxXw2kyOXj2Azu/JYkwKQuK7ckR2HypvH7fVhBfZrcq+aLpAwvZMZ/cjpbowzsI417xWwE1gX97O+BbzBJcUA+qsiFAl1fLNETjFuoQuUpV7X1mc6M0/SdpkZ1Vwsh+em30wcXT0bgjXULiiRQRUAABsAf0Rqv+PoDhdBV6qnITOg2LsHkD9Mw2E9D7oBFf1fpRvM4x2MqgJ4DacmumZbJQ7ksZ5Eb+Pv0Yu/yB4zm5t2kFc8qCdRuw3nolvtxHaz3VpZsEUw81S2u58p1a4LGBd3djMoT9toQjXgMrU713F60xFKr9jSt+v5uu4RtKtn2/6uy4pobYGuUM9EBGR04+nlIvab8Se1v1eXdrmKYHJr2BaYh82RY76J7NKOVxtdjxfMTWdCzTMI8L8wqhkkn6yDhJuCtREqHIxXUdQcyppZmBmeo9GxKsGjELlkuBSdBuCYqczfs5B6xGsGdYvCjO6ozkJ7YnRFDz22k4WEwxv9CM7TYUph1U2nV490r9+MwMpMGkMZcDfNlE4T70o5p2xnMfFoMP2QnlUzI5WDvnyGWhUwfvQqRR39dlOIZIG/SgEgYA9fM/0LyqBwhYE9uhOpluwOn0q9qPtL/W5KT3ibV7NqVU7L0pQgwLPhXkyao+BPTiq5mDvbL6kVlVPvEzl3002x5/2wcF9G6GLjKgotQEhq4fKwufvpvdm63JW1g4eCDxO7i7ecTPZmI7N3nO3tVvkyobOwn13kr7bFxbhWa2dtbl3d84Uc+PSdvR8tx3N6ps0hL/d18k01CXf8vZkfItZpDx8Iw1BW6tSYTUFJUBmu6Hj6LZPCzYq69G/4k1SlHrse2I9TTdRIpEDl+ub0QLvd2qVFU9Ur6wzvjD/6/N/IpX5r53+LnjiLlVyUHFltbgQKkCq2HMWuokviNqcb48S9Jx57oIci6bmzVHthq8AOkmAmdhDJ7VADhyfyKT8k4N39tx2xIL/x5KjTE2JWJzeRN+UeJqLIuxYDTd6DbJ12X5jdmIvBNI5TO8YCfwJ5zd9pVDkUtm9Z3+jifNNs0H6tGXjFxNE+ABJ7AuXhpXST9e5YqRx1G5XciaAChQqOjB5sZBAIbrP1MN8ielpslKY1cuNSDnZwacReJoPUH7oo1oLF0lS04EbhlNJjEdp0SeHYFLXHMSsam/AKUdUUS/3C216dskI025g5l0HQtpQPzaDEAfRQzNKyDYIJFQYND887uS+jTs5lAUAg+TwEg6VrFnJquITrZS2WsNlvR+VC6BGJzmQ7OFHfM2o3b5QvqcmRqljtVp/W5T2RDIo75bPsU2Uxc/ZqGT72QDlLSSazj3z/pfKZkpUI0l2ce+FDL1BhxSMl2Zhe4V4b3XnJ7WXN3ezMJx/V0wGEeTPfhr69Sl9qPlIqqgAx6sJZeDjqD4s7fT+krrSh+4P0EtKaei8bkryokoMCwjDSjJC4QMCk8FNK1ARZ+un+u3QibKuz2ePgJpDJsNXo/ZN9W0nqyGD/S8eO6o/RnfvDd43cd0FQwf8WNvZZoDoTSXKZMhXpgz+xRZ30gFHsSZfaQQLuM53ZphnluoxeKf/8+e/KH+jfFH8YNVZkBNRvDVI6S5RlZ1LPNOVznkGWLFq/tDymGFhW7efZJ2o5Vsi0C/6TbRdRFfZy2Y/inAZ/urFuxVYVGW+4xRia/s3MBO1Z/wZxYSLsuVNZgG8incGo13d92c0pGPC/XoGHBHD/JWMHV9ZsV7xFlychugRAiY3wRXRasafjbURMXEhWm8HF5oVWytME2Y6/xuXT7fo1Tl1YTmHNuJHry6iiQqluabocbBcGPGx848s1hY9FR02F5MSAidmUPiEleHUZoc9iu2CBDuL1i0r7aXIO+QVeQHdyvvwj+e3dpyGHYrLph+DpnhcJiHVSZz3Fgh+VGGWyojauwF4RsiUZFMZAuocGgYP1odEQZWEJBPQzgpX/KFeCR4R5wiTZw2xWAF1kwWKAkp2IJ9amkAfxnUc90E6hSFdRrwhHWLPFuUniKpaFB0CKoMRSV6loeNgLWBslX6dKdlkVo5SyYFWkgbK+MNha0iuZUapY9JvSJDuhcNgABA1SBlAupVhkOzVmz2W54lkPKNV0kuYz+2yVKdG3dzIZKCoC4qeMq/ELfOrp4jno71Um7Y0U5OiDmJtkZZ8mIrpZZ6KwwudW5VnQYGMkrFyO6QawDVP91YNDM2numeMvGQpfrhhSpv4y1Ku0EuV8L3vVybTWpXO1SukbgP9clfSTEVU0VIyLHKsWA6Klrug6lzxuhsUqBjLoEyBfRnVqUL1gZ1p8yv1opayhOFD1D+tJNknMnrx7L8sXzswhsyNHx2t9abVn5F+pqsT08uloPMzpomrr+sWpoYOuGhf0OnubIrfgB4EOSzNMxL5bpapoqUDWn3lzzzS69pL1SKuhqk59xqM1JYblKB95XxqhU1mIAsayJojGwasE7C+0C/TVM9MYu7R6NmTDXrpPucMT4FIKhVR7Jx8vq9eT0aQXJpjcjxiqb9TwGL6/IVXTBjdVB4drBLNQEGOiCRnQcspzwRZUK0WZUEUGI1IXzLlZTpPdKZg5Ai7BKp3LqjJ2QwlL+Zbqjq88/3fyu/rtnf9Od/gQvKfOaalTiokKg6icub9ko6ZOYy1W3FiSXJ9HQAjzp3new+ykrbFdx//gkl3Mk/7DBxxPeQMLNMbNP3/Y/Jnb9i4OLldLHkJvcbftoEhY//sJB+V1G/sCNoAy8eNM5yWAqh/Mkg6JtrraPAGs+vqwo3Ju2eidswB9aT1gf7uko152s+zhQHy/orTVeDNpFQBKdBP0fKTXpiroqCaF6iuIJKEYBnSWXepxNFmQQYRZtS50mdPdBqUYmqWgQQEIxpKKcSSILpaPsWAYGtsAK0BB66MgKQ7saeA+jHwQXO1dXivT8+rQ5HCUD4CS9Ip942dpWxwBeMrYVpeu9VNjDlaaPZfqEs5oSsShpJjthuRukqIAtw+dc5QnAn/1gt77B/+WIoSElQgevUJfwKcaIhG/I9Mka4K/ewCMVhJ6Jp/v21PV1ww+Domui4QOqwdG1aNwLsHLUElhdI8ibpPoyuYhSSlLh2hcSTHEwvcC5Rg1QFqz4j96VYRPxTxypnTSo10f9Qw96HfcfqLGTn0sTxvresnPf/b4e4Nf0ipdqItCTkLPngx+3MMRLrENtWEOKmw9JXMKWXVSDZrdWWVm7HkwTvc+XXyyb/InpqKLmoTip47fKUv9BhUUqpbiQcja7NjeU7pHZ5GuwoTDrV+06i2XH1IHdSlkRY2NR43xW89/X/60frbzX1BjUAa0jHi0vINgnDsvOnm7wi3qOesQnG9LkMEWf94f9CM+lLUM1/Sqy+g5seHSox8VF1fx/xHFFR9WtL5Y2jLkSVGxsnVJXa1fyJyciUhvjNj+uC+9mRFjWzS4YMmleA7wRtZR+ZA3OdvmgOo9L0wYVDA1SPlp9wAQJWWejpL87j7UrsdAOlIbl7WFHIpatmWG1pYymGUqUckmxxSlAF10mJb2oBmaFRnjjAYllYMDi/O0oDrX7PPePUCbO+1lTU2Hp8/1fZ7s4imzGYVVdJRUONLfGSYgl0Or3/ZJj0rChDV0MiOdzDRl9UzR3Tx0YoEufCTXQe1aX+mjRn0tQbEOvQ+bwlsR3Z+e5xdVIUSoe+88AQGRQvQyfU8/mLZ3V5/R9UD5JqnlAaP9WCQM2QioBTVKzEgehjzUiaYg3djhR88/W74/3/vpLH3kKSS3aZ7jHC9KkXs6eAU1wD3jHqbiUf7LVe/X/GuH/ff3DyA0oM006Fk6evld56r+YjX4MZ00VSLbnIrf4CKRqqLzUylMaU5cstJq6E78y7vhgQ13B29Dokd8/fmfyAv9KSER+02krW/6D2JlOtj+tovGsf86x7qu0/fsb8vmIlZvU1SWw/kN573PhghcvF0yc4OLyYjtSy6u2Ua8QwfEYek5lnSrx1ydsnvU5QtsDoMBBbjsYL1zgYy5KBS6opVwJPptOj1QptBvLq5XNw4wXQ37NCKn6Y97Md+tks8PqaZIG+/oekPfFtUdRSsDSG1fT1O6rFJkGJTK3MPzniomBT5aJeKm3EhK2gChzkceSB1APyvmLBhmSZ+ykCAyARU2WVZ72wyrCmYOqZ6BzqQjhFZROSa85VU7JqrMQQOaFGfKM+MDnkBRg84Wrcu8Ouv5MqG47SrFrn30hGMJNaPvhMJrqV/zg8KulnCPSE9yYI9hQO4KU9LRpUMwXBZn+8zA+3rdfn5viN2axiK/VA8OLHS20v5Q5+9Mxl/Jsr2E2t7KpRnUb0NAFUox9uHZ68XJ/lSoUNFznWqVzUPzlxaTV1ygLq/o5WnmTLRZNao9cHdfG41fWiZy1MK1qja/DFETSi1e1SqzUbs7AdYeWh5ZQlHmSE2Ox69bNbXZ2gS/l/WPmiGFGUqiSTlCNqazWGgLjoFzstz1Izzf333+p3KmvyCOEYsHQyZNwRIJrGyWvkSJAiDYOrZjS6jbcPSdCfN/OWWeYhmwYcQJBfVSYHvONK0rXiyvV8uWEfcoVKbM+uZA2rLM1XLBNg+nYtV5NlAJc8IANIaAMbqGSUbxWb+6WU7j0QUAB/Oa68grRd1zuZ3MxMrrakA1ksEbAnyOmtM1xGpWV4sVIHKAxWKusrri310NoGeHnd/11XouOhErjGXPk1/JptHlj6dZW+F/KIcXVL/L3thmFT8zOsfoTmZ1ZlmYdsw0O8eoK3RARdcmJZ29Y54JWDGaOaNJ+4lIBT1aQRU6E9Nli823pqA3UrwKBvYdGv7YQmvecFv2XnMlEB+F86uiVyOwRzOgqLngOIcIj92UiKwl+jB2K2MVS6q7sT+GAPUYGxGn36czkPSCouTUYydMXn2zjK2wolSyHCZVv4ZUlTrJf6a+sNVq83jc+rpwOfy52SCINQcKuDNZSb/VlDp6i+SoLj9e5PN1kmW73v947t94Itczdxg8wHysNp/sZRpKnrnXckDtkJ5AEwdwcq/M2eR61PS+9FqRzuvXUAw5mdMB6Vfj7L55uem996R+pWxz08sURFs67zp8A3gxeFvMlWK/eirJYHTh2OMIOCDqFjW8wxMBzvcfP/8evfGv0LfcE/d3du7MNad9LhViPF136qybD++SDYs+MqJkabpXdbPGhP3EEIh5JCA679Vk+wM9RRKR3nSaVku7iLpNm23ZvgVlYMG2HXTgX+LGgOI+dxPdMPDGASyiy/bEopDRqxHAl/g/21LPwLTAm9VA/JhIlaS/kisIN1BCuTwc3BAPo4jb4kUbkPyHJFUpHrrZbmqPgo+EJ0GlLjV8qbC7qsph/mBq3EuqfMz+sC9DMlJ+SvfBO4wYQq8SszR7mAw8u6mWVQK9Mr0ydQkIOFBcGhZ7QGmwyDr8/tivzwABCuwEUEMml7uuOPUzuNnQvc07HVnU0ZFjmJTo9sUEJCYNTgvYLKCxWDTunrpFoMAzyCo61mGULLyaUI0EvrQI4oPv5LtQpcxgheILyDcovaGERw+8uKSHKthcBewoqN0NVPf35MiYQuefvTzy9VgVnxq+92QY/qrcf9S7finx7xaDYzHYK5bz8KiYvlz8lUHz3uvavmb9pYEmbSX2yuITcuploE61gV5Lrt09VcJjUcboguuP/sjIXGqW0QKTGN8TMG5QXDB5FuTAVVrkWlPCsrZMLCUjytnUKjtvMw8XQnknmiKlMwz11cn88oEcz4v6J6lmHbSyrnH06YpOy/Ju7fdNWum+KSv35cxU3k+drpPGigysQZP9QQ2353Qsxf5Rsz9kq6VrIPeKhvqCwc7PPf8D+b/U5Y7ZqXfGO4ud453znX+x8693/tPO/9j5PgXEWkzFvliKtbgnNlQsvS7e3Nk53Lwi0EBQhZNAPWC9OaTi/GpzIZ/xAmA5vB5aqtxYsvzqkqrcgWFBN8uWBBT/gYXsmwUywH25vrJzNqy/n1xfra6oowfRFltpOpEwJU6iNbG96qZ6QEXdmBeLZ6LTW+emZZ60dLLQxgy7w8m46jPGghq77A/psM/FIT7uyi5A22LIiLEDFHoNqx3g1c31koeVpWhaiBlw+gXOxAyv15Y+yeraRiGKa9OnazC0g/615SV7Q6+4T1em2TCSmr9AiTrPJmto/uwd/BxV+fpLOT0uv1F/8F/vU/+ozpYN4JPJPzvb7LVLN0iycjJU4tfkl66mp9a9PpgEe7pZnCrt6blJPq/ocbBl5cWEavLjvfEd6vmHTuXUGtrC9ZNAndfnS+eWiLH9wrCwmxjShZbzbOlHyHz+8llhF358ULr2IMucG1J9uXQeauoVHCwTwacoVHL9kR++K0ZZOoYdyY/gr5fHZs/QO/6CpMc5uN5odt1CGIUq1s/Rl9P0kr4SOtj13uZMpnf+Vm/2pcaoXOn/2M7NPWXMpFAN9fe6OIBQa8irs0FIH4L3dk89VLPZN3rvPvxTTMv04V9YrX/90T+6ntQfn3zw3U9cnR8Uo/Dut6/eppP+m40xr1ZVvT771b+biM1k8uQlf+/4o4d1Qxk1yYAs79ct2Jyr1aSdzUJYurEMY2DDINMj/+il3hiBaRiGsKjnvbiiHBzcS/LElKeDHtVzInXZKz+SyMf0GeeuHgbfa77SJgOjSu8Lam5SKf+6qKhRdI95jPqL3g2cvaaK3f6gPC7yq484HUbi6xiPruvHjw9++MfvFiKxB1SGnNmjqmfwTLyxhlTDD96++rb0afib70FUblEWf/mwPbg4+/jea9lPfjPra/1N0XzhC9h5JtkHX36n9ZTOZ/b1+Xx99DNPTv/GLd/zlu95y/e85Xv+P+d73saZ2zhzG2du48xtnLmNM7dx5jbO3MaZ2zhzG2du48xtnLmNM7dx5jbO3MaZ2zhzG2du48xtnPn/O87Aj3tHf0N/Y0fu7PQOegf6G9//Gn7u7Pwf/gBC4AB42mNgZGBgAOLrh+yy4vltvjLIczCAwLVpRt8hdNd2Bob/daytbCAuBwMTiAIAS7ELYwAAeNpjYGRgYP31PwpI/mYAAtZWBkYGVOAOAGrhBCwAAAB42m3PKwsCQRSG4W+W3W4x223aTRq8gMltWmwaDCJaFkHQaLIY7IIY7dsEwWITi/4Ag128vMMOWBx4+M6Zw8ww3l0lsbyN5B8tk0YNLfoTLtQdLDBAFn2sUECUzHQjy8hhjQl7FTIF7jFDNJBhf4cHdZ28kk20UaXfk0uMELpz9s0iswPZI7fkFDH12Z+r687/wd9eUvD8pXljzKc/TkyfJ8Mk7SyYSV80s1AveNpjYGDQgUCmQ6wu7HGc87jf8F7h3yeUJyolXiE5Q3qVrJP8NCU5lR61OxoxWlk6cron9KsMFxnfMa0w/2JlZb3HzsIxznmS6xv3OZ4LvKf4fPC7E1ARpBOyJSwufEukStS2mKy4sISupH0pn9LVMqOy+/DCOQA/+TbcAHjaY2BkYGBwZ3JlEGMAASYgZmQAiTkw6IEEABOHARgAeNqNUV1LAkEUPaMWSmASEdFDLL63mrkmBkEEPhTEkn08t36ktLm2jtZTv6Mf0lO/oOwX9Fd66szstIkWxGWWM+eee++5swCW8YIkRCoDYJMnwgIrvEU4gSyKBidRxb7BKeQxNngBD3gyeBF5kTM4jVVRMjiDqqgbvISKeDT4FWvi2eA3FMXE4Amy4tPgd+QS6Qh/JLGRWEeJnrbpxoKLEAG/I3jw0UMTV7hEm+HyBBiQbeOU55oan9mQlTbrVezhHMfUnxDNV23N1M0rrBnFBW8hhvQRoM/s9CQXDTJF7fyH7VIp6Vrpx3GFzd2qzN6y642eJ9Ehqzb0uL0Nh6eCMnYZzj+8//ZOB0SediyZs3BIrqd1FlEfrT/et0u95Jwhaigw7nXYZEI9f1prkwnpo6A9etxCbSrjTc+oVu94pCda2CGncg57l/kGNSKHzPcfb1HdoVbtJemgqfsPOG3EWz3u3sAdGbVNyAr/CzM7bOd42m3Mx04CYRhG4fOCgAURvQa7qP/8zFAsi4k6NgTsla1KYgwbF168Ccp87jibZ3fIMOqnzyvjOgZllCXLIksss8Iqa6yzQYVNttjGEeCpEhJRo06DJjvsssc+hxyR/D5OOOWMc1pc0KZDl0uuuOaGW+6454FHnnjmhZ4mlFNeBU1qStOaUVGzKmlOZc1rIf/28T14D1J84euz71zs/vTO/RuY3qyaoRmZNbNuNsymGaf6JDVKjZKDIVmuNI4AAAAAAVpw2jcAAA==) format('woff'); - font-weight: normal; - font-style: normal; - -} - -.weepeople { - font-family: "WeePeople"; -} \ No newline at end of file diff --git a/spaces/mfrashad/ClothingGAN/netdissect/upsegmodel/prroi_pool/functional.py b/spaces/mfrashad/ClothingGAN/netdissect/upsegmodel/prroi_pool/functional.py deleted file mode 100644 index 7dc7a8c282e846bd633c4fdc4190c4dca3da5a6f..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/ClothingGAN/netdissect/upsegmodel/prroi_pool/functional.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# File : functional.py -# Author : Jiayuan Mao, Tete Xiao -# Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com -# Date : 07/13/2018 -# -# This file is part of PreciseRoIPooling. -# Distributed under terms of the MIT license. -# Copyright (c) 2017 Megvii Technology Limited. - -import torch -import torch.autograd as ag - -try: - from os.path import join as pjoin, dirname - from torch.utils.cpp_extension import load as load_extension - root_dir = pjoin(dirname(__file__), 'src') - _prroi_pooling = load_extension( - '_prroi_pooling', - [pjoin(root_dir, 'prroi_pooling_gpu.c'), pjoin(root_dir, 'prroi_pooling_gpu_impl.cu')], - verbose=False - ) -except ImportError: - raise ImportError('Can not compile Precise RoI Pooling library.') - -__all__ = ['prroi_pool2d'] - - -class PrRoIPool2DFunction(ag.Function): - @staticmethod - def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale): - assert 'FloatTensor' in features.type() and 'FloatTensor' in rois.type(), \ - 'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type()) - - pooled_height = int(pooled_height) - pooled_width = int(pooled_width) - spatial_scale = float(spatial_scale) - - features = features.contiguous() - rois = rois.contiguous() - params = (pooled_height, pooled_width, spatial_scale) - - if features.is_cuda: - output = _prroi_pooling.prroi_pooling_forward_cuda(features, rois, *params) - ctx.params = params - # everything here is contiguous. - ctx.save_for_backward(features, rois, output) - else: - raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.') - - return output - - @staticmethod - def backward(ctx, grad_output): - features, rois, output = ctx.saved_tensors - grad_input = grad_coor = None - - if features.requires_grad: - grad_output = grad_output.contiguous() - grad_input = _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, *ctx.params) - if rois.requires_grad: - grad_output = grad_output.contiguous() - grad_coor = _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, *ctx.params) - - return grad_input, grad_coor, None, None, None - - -prroi_pool2d = PrRoIPool2DFunction.apply - diff --git a/spaces/mikebars/huggingface/assets/index-2544f694.css b/spaces/mikebars/huggingface/assets/index-2544f694.css deleted file mode 100644 index add7e384adb8cdd0bf20acab0be61bbee721f79a..0000000000000000000000000000000000000000 --- a/spaces/mikebars/huggingface/assets/index-2544f694.css +++ /dev/null @@ -1 +0,0 @@ -*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji";font-feature-settings:normal}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;font-weight:inherit;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]{display:none}*,:before,:after{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.container{width:100%}@media (min-width: 640px){.container{max-width:640px}}@media (min-width: 768px){.container{max-width:768px}}@media (min-width: 1024px){.container{max-width:1024px}}@media (min-width: 1280px){.container{max-width:1280px}}@media (min-width: 1536px){.container{max-width:1536px}}.block{display:block}.flex{display:flex}.table{display:table}.hidden{display:none}.h-full{height:100%}.min-h-screen{min-height:100vh}.w-2\/3{width:66.666667%}.w-full{width:100%}.cursor-not-allowed{cursor:not-allowed}.cursor-pointer{cursor:pointer}.cursor-wait{cursor:wait}.select-text{-webkit-user-select:text;-moz-user-select:text;user-select:text}.flex-col{flex-direction:column}.items-center{align-items:center}.justify-center{justify-content:center}.space-y-12>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(3rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(3rem * var(--tw-space-y-reverse))}.overflow-auto{overflow:auto}.whitespace-pre-wrap{white-space:pre-wrap}.border-4{border-width:4px}.border-yellow-200{--tw-border-opacity: 1;border-color:rgb(254 240 138 / var(--tw-border-opacity))}.bg-yellow-200{--tw-bg-opacity: 1;background-color:rgb(254 240 138 / var(--tw-bg-opacity))}.bg-yellow-500{--tw-bg-opacity: 1;background-color:rgb(234 179 8 / var(--tw-bg-opacity))}.p-6{padding:1.5rem}.py-24{padding-top:6rem;padding-bottom:6rem}.py-6{padding-top:1.5rem;padding-bottom:1.5rem}.text-center{text-align:center}.text-6xl{font-size:3.75rem;line-height:1}.text-xl{font-size:1.25rem;line-height:1.75rem}.opacity-50{opacity:.5}.filter{filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}*,*:before,*:after{box-sizing:inherit;-webkit-user-select:inherit;-moz-user-select:inherit;user-select:inherit}html,body,#root{box-sizing:border-box;height:100%;min-height:100vh;width:100%;min-width:100vw;margin:0;padding:0;-webkit-user-select:none;-moz-user-select:none;user-select:none}input::-webkit-file-upload-button{display:none}@media (min-width: 1024px){.lg\:w-1\/3{width:33.333333%}} diff --git a/spaces/mikkoar/marco/src/components/chat-suggestions.tsx b/spaces/mikkoar/marco/src/components/chat-suggestions.tsx deleted file mode 100644 index 00c2fee295c9e010946046eb71705a5e131f7a5a..0000000000000000000000000000000000000000 --- a/spaces/mikkoar/marco/src/components/chat-suggestions.tsx +++ /dev/null @@ -1,45 +0,0 @@ -import React, { useMemo } from 'react' -import Image from 'next/image' -import HelpIcon from '@/assets/images/help.svg' -import { SuggestedResponse } from '@/lib/bots/bing/types' -import { useBing } from '@/lib/hooks/use-bing' -import { atom, useAtom } from 'jotai' - -type Suggestions = SuggestedResponse[] -const helpSuggestions = ['为什么不回应某些主题', '告诉我更多关于必应的资迅', '必应如何使用 AI?'].map((text) => ({ text })) -const suggestionsAtom = atom([]) - -type ChatSuggestionsProps = React.ComponentProps<'div'> & Pick, 'setInput'> & { suggestions?: Suggestions } - -export function ChatSuggestions({ setInput, suggestions = [] }: ChatSuggestionsProps) { - const [currentSuggestions, setSuggestions] = useAtom(suggestionsAtom) - const toggleSuggestions = (() => { - if (currentSuggestions === helpSuggestions) { - setSuggestions(suggestions) - } else { - setSuggestions(helpSuggestions) - } - }) - - useMemo(() => { - setSuggestions(suggestions) - window.scrollBy(0, 2000) - }, [suggestions.length]) - - return currentSuggestions?.length ? ( -
            -
            - - { - currentSuggestions.map(suggestion => ( - - )) - } -
            -
            - ) : null -} diff --git a/spaces/mms-meta/MMS/vits/text/__init__.py b/spaces/mms-meta/MMS/vits/text/__init__.py deleted file mode 100644 index 4ac41f9025755d8ffd74068af14c6cfc8e5a4173..0000000000000000000000000000000000000000 --- a/spaces/mms-meta/MMS/vits/text/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/mnauf/detect-bees/utils/segment/dataloaders.py b/spaces/mnauf/detect-bees/utils/segment/dataloaders.py deleted file mode 100644 index a63d6ec013fd7fc9223e203fa5934f0f3d99312c..0000000000000000000000000000000000000000 --- a/spaces/mnauf/detect-bees/utils/segment/dataloaders.py +++ /dev/null @@ -1,330 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Dataloaders -""" - -import os -import random - -import cv2 -import numpy as np -import torch -from torch.utils.data import DataLoader, distributed - -from ..augmentations import augment_hsv, copy_paste, letterbox -from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker -from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn -from ..torch_utils import torch_distributed_zero_first -from .augmentations import mixup, random_perspective - -RANK = int(os.getenv('RANK', -1)) - - -def create_dataloader(path, - imgsz, - batch_size, - stride, - single_cls=False, - hyp=None, - augment=False, - cache=False, - pad=0.0, - rect=False, - rank=-1, - workers=8, - image_weights=False, - quad=False, - prefix='', - shuffle=False, - mask_downsample_ratio=1, - overlap_mask=False): - if rect and shuffle: - LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') - shuffle = False - with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP - dataset = LoadImagesAndLabelsAndMasks( - path, - imgsz, - batch_size, - augment=augment, # augmentation - hyp=hyp, # hyperparameters - rect=rect, # rectangular batches - cache_images=cache, - single_cls=single_cls, - stride=int(stride), - pad=pad, - image_weights=image_weights, - prefix=prefix, - downsample_ratio=mask_downsample_ratio, - overlap=overlap_mask) - - batch_size = min(batch_size, len(dataset)) - nd = torch.cuda.device_count() # number of CUDA devices - nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers - sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) - loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates - generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) - return loader( - dataset, - batch_size=batch_size, - shuffle=shuffle and sampler is None, - num_workers=nw, - sampler=sampler, - pin_memory=True, - collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, - worker_init_fn=seed_worker, - generator=generator, - ), dataset - - -class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing - - def __init__( - self, - path, - img_size=640, - batch_size=16, - augment=False, - hyp=None, - rect=False, - image_weights=False, - cache_images=False, - single_cls=False, - stride=32, - pad=0, - prefix="", - downsample_ratio=1, - overlap=False, - ): - super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, - stride, pad, prefix) - self.downsample_ratio = downsample_ratio - self.overlap = overlap - - def __getitem__(self, index): - index = self.indices[index] # linear, shuffled, or image_weights - - hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp['mosaic'] - masks = [] - if mosaic: - # Load mosaic - img, labels, segments = self.load_mosaic(index) - shapes = None - - # MixUp augmentation - if random.random() < hyp["mixup"]: - img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) - - else: - # Load image - img, (h0, w0), (h, w) = self.load_image(index) - - # Letterbox - shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape - img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) - shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling - - labels = self.labels[index].copy() - # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy - segments = self.segments[index].copy() - if len(segments): - for i_s in range(len(segments)): - segments[i_s] = xyn2xy( - segments[i_s], - ratio[0] * w, - ratio[1] * h, - padw=pad[0], - padh=pad[1], - ) - if labels.size: # normalized xywh to pixel xyxy format - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) - - if self.augment: - img, labels, segments = random_perspective(img, - labels, - segments=segments, - degrees=hyp["degrees"], - translate=hyp["translate"], - scale=hyp["scale"], - shear=hyp["shear"], - perspective=hyp["perspective"]) - - nl = len(labels) # number of labels - if nl: - labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) - if self.overlap: - masks, sorted_idx = polygons2masks_overlap(img.shape[:2], - segments, - downsample_ratio=self.downsample_ratio) - masks = masks[None] # (640, 640) -> (1, 640, 640) - labels = labels[sorted_idx] - else: - masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio) - - masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] // - self.downsample_ratio, img.shape[1] // - self.downsample_ratio)) - # TODO: albumentations support - if self.augment: - # Albumentations - # there are some augmentation that won't change boxes and masks, - # so just be it for now. - img, labels = self.albumentations(img, labels) - nl = len(labels) # update after albumentations - - # HSV color-space - augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) - - # Flip up-down - if random.random() < hyp["flipud"]: - img = np.flipud(img) - if nl: - labels[:, 2] = 1 - labels[:, 2] - masks = torch.flip(masks, dims=[1]) - - # Flip left-right - if random.random() < hyp["fliplr"]: - img = np.fliplr(img) - if nl: - labels[:, 1] = 1 - labels[:, 1] - masks = torch.flip(masks, dims=[2]) - - # Cutouts # labels = cutout(img, labels, p=0.5) - - labels_out = torch.zeros((nl, 6)) - if nl: - labels_out[:, 1:] = torch.from_numpy(labels) - - # Convert - img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img = np.ascontiguousarray(img) - - return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks) - - def load_mosaic(self, index): - # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic - labels4, segments4 = [], [] - s = self.img_size - yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y - - # 3 additional image indices - indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = self.load_image(index) - - # place img in img4 - if i == 0: # top left - img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) - x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) - elif i == 1: # top right - x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc - x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h - elif i == 2: # bottom left - x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) - x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) - elif i == 3: # bottom right - x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) - x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - padw = x1a - x1b - padh = y1a - y1b - - labels, segments = self.labels[index].copy(), self.segments[index].copy() - - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padw, padh) for x in segments] - labels4.append(labels) - segments4.extend(segments) - - # Concat/clip labels - labels4 = np.concatenate(labels4, 0) - for x in (labels4[:, 1:], *segments4): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img4, labels4 = replicate(img4, labels4) # replicate - - # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) - img4, labels4, segments4 = random_perspective(img4, - labels4, - segments4, - degrees=self.hyp["degrees"], - translate=self.hyp["translate"], - scale=self.hyp["scale"], - shear=self.hyp["shear"], - perspective=self.hyp["perspective"], - border=self.mosaic_border) # border to remove - return img4, labels4, segments4 - - @staticmethod - def collate_fn(batch): - img, label, path, shapes, masks = zip(*batch) # transposed - batched_masks = torch.cat(masks, 0) - for i, l in enumerate(label): - l[:, 0] = i # add target image index for build_targets() - return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks - - -def polygon2mask(img_size, polygons, color=1, downsample_ratio=1): - """ - Args: - img_size (tuple): The image size. - polygons (np.ndarray): [N, M], N is the number of polygons, - M is the number of points(Be divided by 2). - """ - mask = np.zeros(img_size, dtype=np.uint8) - polygons = np.asarray(polygons) - polygons = polygons.astype(np.int32) - shape = polygons.shape - polygons = polygons.reshape(shape[0], -1, 2) - cv2.fillPoly(mask, polygons, color=color) - nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio) - # NOTE: fillPoly firstly then resize is trying the keep the same way - # of loss calculation when mask-ratio=1. - mask = cv2.resize(mask, (nw, nh)) - return mask - - -def polygons2masks(img_size, polygons, color, downsample_ratio=1): - """ - Args: - img_size (tuple): The image size. - polygons (list[np.ndarray]): each polygon is [N, M], - N is the number of polygons, - M is the number of points(Be divided by 2). - """ - masks = [] - for si in range(len(polygons)): - mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio) - masks.append(mask) - return np.array(masks) - - -def polygons2masks_overlap(img_size, segments, downsample_ratio=1): - """Return a (640, 640) overlap mask.""" - masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), - dtype=np.int32 if len(segments) > 255 else np.uint8) - areas = [] - ms = [] - for si in range(len(segments)): - mask = polygon2mask( - img_size, - [segments[si].reshape(-1)], - downsample_ratio=downsample_ratio, - color=1, - ) - ms.append(mask) - areas.append(mask.sum()) - areas = np.asarray(areas) - index = np.argsort(-areas) - ms = np.array(ms)[index] - for i in range(len(segments)): - mask = ms[i] * (i + 1) - masks = masks + mask - masks = np.clip(masks, a_min=0, a_max=i + 1) - return masks, index diff --git a/spaces/mshkdm/VToonify/vtoonify/model/raft/core/datasets.py b/spaces/mshkdm/VToonify/vtoonify/model/raft/core/datasets.py deleted file mode 100644 index 9991f15f4c3861c19d1a4b8766d49f83af11db70..0000000000000000000000000000000000000000 --- a/spaces/mshkdm/VToonify/vtoonify/model/raft/core/datasets.py +++ /dev/null @@ -1,235 +0,0 @@ -# Data loading based on https://github.com/NVIDIA/flownet2-pytorch - -import numpy as np -import torch -import torch.utils.data as data -import torch.nn.functional as F - -import os -import math -import random -from glob import glob -import os.path as osp - -from model.raft.core.utils import frame_utils -from model.raft.core.utils.augmentor import FlowAugmentor, SparseFlowAugmentor - - -class FlowDataset(data.Dataset): - def __init__(self, aug_params=None, sparse=False): - self.augmentor = None - self.sparse = sparse - if aug_params is not None: - if sparse: - self.augmentor = SparseFlowAugmentor(**aug_params) - else: - self.augmentor = FlowAugmentor(**aug_params) - - self.is_test = False - self.init_seed = False - self.flow_list = [] - self.image_list = [] - self.extra_info = [] - - def __getitem__(self, index): - - if self.is_test: - img1 = frame_utils.read_gen(self.image_list[index][0]) - img2 = frame_utils.read_gen(self.image_list[index][1]) - img1 = np.array(img1).astype(np.uint8)[..., :3] - img2 = np.array(img2).astype(np.uint8)[..., :3] - img1 = torch.from_numpy(img1).permute(2, 0, 1).float() - img2 = torch.from_numpy(img2).permute(2, 0, 1).float() - return img1, img2, self.extra_info[index] - - if not self.init_seed: - worker_info = torch.utils.data.get_worker_info() - if worker_info is not None: - torch.manual_seed(worker_info.id) - np.random.seed(worker_info.id) - random.seed(worker_info.id) - self.init_seed = True - - index = index % len(self.image_list) - valid = None - if self.sparse: - flow, valid = frame_utils.readFlowKITTI(self.flow_list[index]) - else: - flow = frame_utils.read_gen(self.flow_list[index]) - - img1 = frame_utils.read_gen(self.image_list[index][0]) - img2 = frame_utils.read_gen(self.image_list[index][1]) - - flow = np.array(flow).astype(np.float32) - img1 = np.array(img1).astype(np.uint8) - img2 = np.array(img2).astype(np.uint8) - - # grayscale images - if len(img1.shape) == 2: - img1 = np.tile(img1[...,None], (1, 1, 3)) - img2 = np.tile(img2[...,None], (1, 1, 3)) - else: - img1 = img1[..., :3] - img2 = img2[..., :3] - - if self.augmentor is not None: - if self.sparse: - img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid) - else: - img1, img2, flow = self.augmentor(img1, img2, flow) - - img1 = torch.from_numpy(img1).permute(2, 0, 1).float() - img2 = torch.from_numpy(img2).permute(2, 0, 1).float() - flow = torch.from_numpy(flow).permute(2, 0, 1).float() - - if valid is not None: - valid = torch.from_numpy(valid) - else: - valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000) - - return img1, img2, flow, valid.float() - - - def __rmul__(self, v): - self.flow_list = v * self.flow_list - self.image_list = v * self.image_list - return self - - def __len__(self): - return len(self.image_list) - - -class MpiSintel(FlowDataset): - def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean'): - super(MpiSintel, self).__init__(aug_params) - flow_root = osp.join(root, split, 'flow') - image_root = osp.join(root, split, dstype) - - if split == 'test': - self.is_test = True - - for scene in os.listdir(image_root): - image_list = sorted(glob(osp.join(image_root, scene, '*.png'))) - for i in range(len(image_list)-1): - self.image_list += [ [image_list[i], image_list[i+1]] ] - self.extra_info += [ (scene, i) ] # scene and frame_id - - if split != 'test': - self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo'))) - - -class FlyingChairs(FlowDataset): - def __init__(self, aug_params=None, split='train', root='datasets/FlyingChairs_release/data'): - super(FlyingChairs, self).__init__(aug_params) - - images = sorted(glob(osp.join(root, '*.ppm'))) - flows = sorted(glob(osp.join(root, '*.flo'))) - assert (len(images)//2 == len(flows)) - - split_list = np.loadtxt('chairs_split.txt', dtype=np.int32) - for i in range(len(flows)): - xid = split_list[i] - if (split=='training' and xid==1) or (split=='validation' and xid==2): - self.flow_list += [ flows[i] ] - self.image_list += [ [images[2*i], images[2*i+1]] ] - - -class FlyingThings3D(FlowDataset): - def __init__(self, aug_params=None, root='datasets/FlyingThings3D', dstype='frames_cleanpass'): - super(FlyingThings3D, self).__init__(aug_params) - - for cam in ['left']: - for direction in ['into_future', 'into_past']: - image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*'))) - image_dirs = sorted([osp.join(f, cam) for f in image_dirs]) - - flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*'))) - flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs]) - - for idir, fdir in zip(image_dirs, flow_dirs): - images = sorted(glob(osp.join(idir, '*.png')) ) - flows = sorted(glob(osp.join(fdir, '*.pfm')) ) - for i in range(len(flows)-1): - if direction == 'into_future': - self.image_list += [ [images[i], images[i+1]] ] - self.flow_list += [ flows[i] ] - elif direction == 'into_past': - self.image_list += [ [images[i+1], images[i]] ] - self.flow_list += [ flows[i+1] ] - - -class KITTI(FlowDataset): - def __init__(self, aug_params=None, split='training', root='datasets/KITTI'): - super(KITTI, self).__init__(aug_params, sparse=True) - if split == 'testing': - self.is_test = True - - root = osp.join(root, split) - images1 = sorted(glob(osp.join(root, 'image_2/*_10.png'))) - images2 = sorted(glob(osp.join(root, 'image_2/*_11.png'))) - - for img1, img2 in zip(images1, images2): - frame_id = img1.split('/')[-1] - self.extra_info += [ [frame_id] ] - self.image_list += [ [img1, img2] ] - - if split == 'training': - self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png'))) - - -class HD1K(FlowDataset): - def __init__(self, aug_params=None, root='datasets/HD1k'): - super(HD1K, self).__init__(aug_params, sparse=True) - - seq_ix = 0 - while 1: - flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix))) - images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix))) - - if len(flows) == 0: - break - - for i in range(len(flows)-1): - self.flow_list += [flows[i]] - self.image_list += [ [images[i], images[i+1]] ] - - seq_ix += 1 - - -def fetch_dataloader(args, TRAIN_DS='C+T+K+S+H'): - """ Create the data loader for the corresponding trainign set """ - - if args.stage == 'chairs': - aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True} - train_dataset = FlyingChairs(aug_params, split='training') - - elif args.stage == 'things': - aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True} - clean_dataset = FlyingThings3D(aug_params, dstype='frames_cleanpass') - final_dataset = FlyingThings3D(aug_params, dstype='frames_finalpass') - train_dataset = clean_dataset + final_dataset - - elif args.stage == 'sintel': - aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True} - things = FlyingThings3D(aug_params, dstype='frames_cleanpass') - sintel_clean = MpiSintel(aug_params, split='training', dstype='clean') - sintel_final = MpiSintel(aug_params, split='training', dstype='final') - - if TRAIN_DS == 'C+T+K+S+H': - kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True}) - hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True}) - train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things - - elif TRAIN_DS == 'C+T+K/S': - train_dataset = 100*sintel_clean + 100*sintel_final + things - - elif args.stage == 'kitti': - aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False} - train_dataset = KITTI(aug_params, split='training') - - train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, - pin_memory=False, shuffle=True, num_workers=4, drop_last=True) - - print('Training with %d image pairs' % len(train_dataset)) - return train_loader - diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/decode.sh b/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/decode.sh deleted file mode 100644 index 811cb63c88bb7cdd03b0a250ef2db32b5eaa50df..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/decode.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -set -u - -val_sets="dev_other" -graph_name=graph -decode_suffix="" -decode_script="steps/decode_fmllr.sh" -decode_args="" -nj=60 - -. ./cmd.sh -. ./path.sh -. parse_options.sh - -set -x -exp_dir=$1 -data_root=$2 -lang_test=$3 - -graph=$exp_dir/$graph_name - -if [ ! -d $graph ]; then - utils/mkgraph.sh $lang_test $exp_dir $graph -fi - -for part in $val_sets; do - dec_dir=$exp_dir/decode${decode_suffix}_${part} - if [ ! -d $dec_dir ]; then - echo "decoding $part for $exp_dir" - $decode_script --nj $nj --cmd "$decode_cmd" $decode_args \ - $graph $data_root/$part $dec_dir & - else - echo "$dec_dir exists. skip" - fi -done - -wait diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/audio/__init__.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/data/audio/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/model_parallel/__init__.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/model_parallel/__init__.py deleted file mode 100644 index 69f21684872f72ae8ee26d9ff7d2d2b6e6d526c3..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/model_parallel/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from . import criterions, models, modules # noqa diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/optim/fairseq_optimizer.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/optim/fairseq_optimizer.py deleted file mode 100644 index 7e5411753a2ba94f3a7a68316131530b8b17d22a..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/optim/fairseq_optimizer.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from fairseq import utils -from fairseq.dataclass.utils import gen_parser_from_dataclass - - -class FairseqOptimizer(object): - def __init__(self, cfg): - super().__init__() - self.cfg = cfg - - @classmethod - def add_args(cls, parser): - """Add optimizer-specific arguments to the parser.""" - dc = getattr(cls, "__dataclass", None) - if dc is not None: - gen_parser_from_dataclass(parser, dc()) - - @property - def optimizer(self): - """Return a torch.optim.optimizer.Optimizer instance.""" - if not hasattr(self, "_optimizer"): - raise NotImplementedError - if not isinstance(self._optimizer, torch.optim.Optimizer): - raise ValueError("_optimizer must be an instance of torch.optim.Optimizer") - return self._optimizer - - @optimizer.setter - def optimizer(self, optimizer): - """Reset optimizer instance.""" - if not hasattr(self, "_optimizer"): - raise NotImplementedError - if not isinstance(self._optimizer, torch.optim.Optimizer): - raise ValueError("_optimizer must be an instance of torch.optim.Optimizer") - self._optimizer = optimizer - - @property - def optimizer_config(self): - """ - Return a kwarg dictionary that will be used to override optimizer - args stored in checkpoints. This allows us to load a checkpoint and - resume training using a different set of optimizer args, e.g., with a - different learning rate. - """ - raise NotImplementedError - - @property - def params(self): - """Return an iterable of the parameters held by the optimizer.""" - for param_group in self.param_groups: - for p in param_group["params"]: - yield p - - @property - def param_groups(self): - return self.optimizer.param_groups - - def __getstate__(self): - return self._optimizer.__getstate__() - - def get_lr(self): - """Return the current learning rate.""" - return self.param_groups[0]["lr"] - - def set_lr(self, lr): - """Set the learning rate.""" - for param_group in self.param_groups: - param_group["lr"] = lr - - def state_dict(self): - """Return the optimizer's state dict.""" - return self.optimizer.state_dict() - - def load_state_dict(self, state_dict, optimizer_overrides=None): - """Load an optimizer state dict. - - In general we should prefer the configuration of the existing optimizer - instance (e.g., learning rate) over that found in the state_dict. This - allows us to resume training from a checkpoint using a new set of - optimizer args. - """ - self.optimizer.load_state_dict(state_dict) - - if optimizer_overrides is not None and len(optimizer_overrides) > 0: - # override learning rate, momentum, etc. with latest values - for group in self.param_groups: - group.update(optimizer_overrides) - - def backward(self, loss): - """Computes the sum of gradients of the given tensor w.r.t. graph leaves.""" - loss.backward() - - def all_reduce_grads(self, module): - """Manually all-reduce gradients (if required).""" - if hasattr(module, "all_reduce_grads"): - module.all_reduce_grads() - - def multiply_grads(self, c): - """Multiplies grads by a constant *c*.""" - for p in self.params: - if p.grad is not None: - if torch.is_tensor(c): - c = c.to(p.grad.device) - p.grad.data.mul_(c) - - def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): - """Clips gradient norm.""" - return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn) - - def step(self, closure=None, scale=1.0, groups=None): - """Performs a single optimization step.""" - if self.supports_step_with_scale: - if self.supports_groups: - self.optimizer.step(closure, scale=scale, groups=groups) - else: - self.optimizer.step(closure, scale=scale) - else: - if scale != 1.0: - self.multiply_grads(1.0 / scale) - if self.supports_groups: - self.optimizer.step(closure, groups=groups) - else: - self.optimizer.step(closure) - - def zero_grad(self): - """Clears the gradients of all optimized parameters.""" - for p in self.params: - p.grad = None - self.optimizer.zero_grad() - - @property - def supports_memory_efficient_fp16(self): - if hasattr(self.optimizer, "supports_memory_efficient_fp16"): - return self.optimizer.supports_memory_efficient_fp16 - return False - - @property - def supports_step_with_scale(self): - if hasattr(self.optimizer, "supports_step_with_scale"): - return self.optimizer.supports_step_with_scale - return False - - @property - def supports_groups(self): - if hasattr(self.optimizer, "supports_groups"): - return self.optimizer.supports_groups - return False - - @property - def supports_flat_params(self): - """ - Whether the optimizer supports collapsing of the model - parameters/gradients into a single contiguous Tensor. - """ - if hasattr(self.optimizer, "supports_flat_params"): - return self.optimizer.supports_flat_params - return False - - def average_params(self): - pass - - def broadcast_global_state_dict(self, state_dict): - """ - Broadcasts a global state dict to all ranks. - Useful for optimizers that shard state between ranks. - """ - if hasattr(self.optimizer, "broadcast_global_state_dict"): - return self.optimizer.broadcast_global_state_dict(state_dict) - else: - return state_dict - - -class LegacyFairseqOptimizer(FairseqOptimizer): - def __init__(self, args): - self.args = args diff --git a/spaces/mtulow/geospatial_deep_learning_app/app.py b/spaces/mtulow/geospatial_deep_learning_app/app.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/muheiroiro/youtube_comments_chat/create_chatbot.py b/spaces/muheiroiro/youtube_comments_chat/create_chatbot.py deleted file mode 100644 index eb441a06c4c6378c5561e23b095df5dbaef1e485..0000000000000000000000000000000000000000 --- a/spaces/muheiroiro/youtube_comments_chat/create_chatbot.py +++ /dev/null @@ -1,43 +0,0 @@ -# vectorstoreからチャットボットの作成 - -from langchain.chains.llm import LLMChain -from langchain.callbacks.base import BaseCallbackManager -from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler -from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT, QA_PROMPT -from langchain.chains.question_answering import load_qa_chain -from langchain.vectorstores.base import VectorStore -from langchain.chains import ChatVectorDBChain -from langchain.llms import OpenAIChat -from load_documents import ingest_docs - -# 標準出力にストリーミングで回答を出力するためのコールバック関数 -manager = BaseCallbackManager([StreamingStdOutCallbackHandler()]) - -def chatbot(key, question, gpt_model): - streaming_llm = OpenAIChat( - model_name=gpt_model, - openai_api_key= key, - streaming=True, - callback_manager=manager, - verbose=True, - temperature=0 - ) - question_gen_llm = OpenAIChat( - openai_api_key= key, - temperature=0, - verbose=True, - callback_manager=manager - ) - # チャット履歴と新しい質問を取り込み、独立した質問を生成するプロンプト - question_generator = LLMChain(llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT) - # ドキュメントとスタンドアローンの質問を取り込み、質問に答えるためのプロンプトを渡す - doc_chain = load_qa_chain(streaming_llm, chain_type="stuff", prompt=QA_PROMPT) - # 埋め込みモデルからプロンプトを生成する - qa = ChatVectorDBChain( - vectorstore=ingest_docs("youtube_comments", key), - combine_docs_chain=doc_chain, - question_generator=question_generator) - - answer = qa({"question": question, "chat_history": []}) - - return answer["answer"] \ No newline at end of file diff --git a/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/losses/__init__.py b/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/losses/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/myrad01/Inpaint-Anything/utils/__init__.py b/spaces/myrad01/Inpaint-Anything/utils/__init__.py deleted file mode 100644 index 90f60fdd89ad8575faafe45188bd1d968852fc67..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .utils import * \ No newline at end of file diff --git a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/lr_scheduler.py b/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/lr_scheduler.py deleted file mode 100644 index cd693c9139fb1bcc5c97def4278c45aa84205e28..0000000000000000000000000000000000000000 --- a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/lr_scheduler.py +++ /dev/null @@ -1,185 +0,0 @@ -from bisect import bisect_right - -from timm.scheduler.cosine_lr import CosineLRScheduler -from timm.scheduler.step_lr import StepLRScheduler -from timm.scheduler.scheduler import Scheduler - -import torch -import torch.distributed as dist - - -def build_scheduler(config, optimizer, n_iter_per_epoch): - num_steps = int(config.TRAIN.EPOCHS * n_iter_per_epoch) - warmup_steps = int(config.TRAIN.WARMUP_EPOCHS * n_iter_per_epoch) - decay_steps = int( - config.TRAIN.LR_SCHEDULER.DECAY_EPOCHS * n_iter_per_epoch) - multi_steps = [ - i * n_iter_per_epoch for i in config.TRAIN.LR_SCHEDULER.MULTISTEPS] - - lr_scheduler = None - if config.TRAIN.LR_SCHEDULER.NAME == 'cosine': - lr_scheduler = CosineLRScheduler( - optimizer, - t_initial=num_steps, - cycle_mul=1., - lr_min=config.TRAIN.MIN_LR, - warmup_lr_init=config.TRAIN.WARMUP_LR, - warmup_t=warmup_steps, - cycle_limit=1, - t_in_epochs=False, - ) - elif config.TRAIN.LR_SCHEDULER.NAME == 'linear': - lr_scheduler = LinearLRScheduler( - optimizer, - t_initial=num_steps, - lr_min_rate=0.01, - warmup_lr_init=config.TRAIN.WARMUP_LR, - warmup_t=warmup_steps, - t_in_epochs=False, - ) - elif config.TRAIN.LR_SCHEDULER.NAME == 'step': - lr_scheduler = StepLRScheduler( - optimizer, - decay_t=decay_steps, - decay_rate=config.TRAIN.LR_SCHEDULER.DECAY_RATE, - warmup_lr_init=config.TRAIN.WARMUP_LR, - warmup_t=warmup_steps, - t_in_epochs=False, - ) - elif config.TRAIN.LR_SCHEDULER.NAME == 'multistep': - lr_scheduler = MultiStepLRScheduler( - optimizer, - milestones=multi_steps, - gamma=config.TRAIN.LR_SCHEDULER.GAMMA, - warmup_lr_init=config.TRAIN.WARMUP_LR, - warmup_t=warmup_steps, - t_in_epochs=False, - ) - - return lr_scheduler - - -class LinearLRScheduler(Scheduler): - def __init__(self, - optimizer: torch.optim.Optimizer, - t_initial: int, - lr_min_rate: float, - warmup_t=0, - warmup_lr_init=0., - t_in_epochs=True, - noise_range_t=None, - noise_pct=0.67, - noise_std=1.0, - noise_seed=42, - initialize=True, - ) -> None: - super().__init__( - optimizer, param_group_field="lr", - noise_range_t=noise_range_t, noise_pct=noise_pct, - noise_std=noise_std, noise_seed=noise_seed, - initialize=initialize) - - self.t_initial = t_initial - self.lr_min_rate = lr_min_rate - self.warmup_t = warmup_t - self.warmup_lr_init = warmup_lr_init - self.t_in_epochs = t_in_epochs - if self.warmup_t: - self.warmup_steps = [(v - warmup_lr_init) / - self.warmup_t for v in self.base_values] - super().update_groups(self.warmup_lr_init) - else: - self.warmup_steps = [1 for _ in self.base_values] - - def _get_lr(self, t): - if t < self.warmup_t: - lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] - else: - t = t - self.warmup_t - total_t = self.t_initial - self.warmup_t - lrs = [v - ((v - v * self.lr_min_rate) * (t / total_t)) - for v in self.base_values] - return lrs - - def get_epoch_values(self, epoch: int): - if self.t_in_epochs: - return self._get_lr(epoch) - else: - return None - - def get_update_values(self, num_updates: int): - if not self.t_in_epochs: - return self._get_lr(num_updates) - else: - return None - - -class MultiStepLRScheduler(Scheduler): - def __init__(self, optimizer: torch.optim.Optimizer, - milestones, gamma=0.1, warmup_t=0, - warmup_lr_init=0, t_in_epochs=True) -> None: - super().__init__(optimizer, param_group_field="lr") - - self.milestones = milestones - self.gamma = gamma - self.warmup_t = warmup_t - self.warmup_lr_init = warmup_lr_init - self.t_in_epochs = t_in_epochs - if self.warmup_t: - self.warmup_steps = [(v - warmup_lr_init) / - self.warmup_t for v in self.base_values] - super().update_groups(self.warmup_lr_init) - else: - self.warmup_steps = [1 for _ in self.base_values] - - assert self.warmup_t <= min(self.milestones) - - def _get_lr(self, t): - if t < self.warmup_t: - lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] - else: - lrs = [v * (self.gamma ** bisect_right(self.milestones, t)) - for v in self.base_values] - return lrs - - def get_epoch_values(self, epoch: int): - if self.t_in_epochs: - return self._get_lr(epoch) - else: - return None - - def get_update_values(self, num_updates: int): - if not self.t_in_epochs: - return self._get_lr(num_updates) - else: - return None - - -def setup_scaled_lr(config): - # linear scale the learning rate according to total batch size, - # may not be optimal - - batch_size = config.DATA.BATCH_SIZE - - world_size = dist.get_world_size() - - denom_const = 512.0 - - accumulation_steps = config.TRAIN.ACCUMULATION_STEPS - - linear_scaled_lr = config.TRAIN.BASE_LR * \ - batch_size * world_size / denom_const - - linear_scaled_warmup_lr = config.TRAIN.WARMUP_LR * \ - batch_size * world_size / denom_const - - linear_scaled_min_lr = config.TRAIN.MIN_LR * \ - batch_size * world_size / denom_const - - # gradient accumulation also need to scale the learning rate - if accumulation_steps > 1: - linear_scaled_lr = linear_scaled_lr * accumulation_steps - linear_scaled_warmup_lr = linear_scaled_warmup_lr * accumulation_steps - linear_scaled_min_lr = linear_scaled_min_lr * accumulation_steps - - return linear_scaled_lr, linear_scaled_warmup_lr, linear_scaled_min_lr diff --git a/spaces/nateraw/quickdraw/README.md b/spaces/nateraw/quickdraw/README.md deleted file mode 100644 index 02bd5a05da58c8ba7f81964f66572b5f2f39a09f..0000000000000000000000000000000000000000 --- a/spaces/nateraw/quickdraw/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Quickdraw -emoji: 💻 -colorFrom: blue -colorTo: green -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Acronis True Image 2018 Build 11530 Universal Restore BootCD Downloadl Free.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Acronis True Image 2018 Build 11530 Universal Restore BootCD Downloadl Free.md deleted file mode 100644 index 4a725384129efe1aeea07bae62760320997a06f0..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Acronis True Image 2018 Build 11530 Universal Restore BootCD Downloadl Free.md +++ /dev/null @@ -1,176 +0,0 @@ -
            -

            Acronis True Image 2018 Build 11530 Universal Restore BootCD Download

            -

            If you are looking for a reliable and easy-to-use backup and recovery software, you might want to consider Acronis True Image 2018. This software allows you to create a complete or partial backup of your system or data, and restore it in case of a disaster, such as a virus attack, a hardware failure, or a human error. You can also use it to migrate your system to a new or different hardware, thanks to the Acronis Universal Restore tool. This tool enables you to restore your system to dissimilar hardware, even if it has different drivers, chipsets, or controllers.

            -

            In this article, we will show you how to download and use Acronis True Image 2018 Build 11530 Universal Restore BootCD, which is a bootable media that contains both Acronis True Image 2018 and Acronis Universal Restore. With this bootable media, you can backup, restore, or migrate your system without installing any software on your hard drive. You can also use it to access your system when it is unbootable or corrupted.

            -

            Acronis True Image 2018 Build 11530 Universal Restore BootCD Downloadl


            DOWNLOAD ⚹⚹⚹ https://urlcod.com/2uI9BZ



            -

            Here are the main points of the article:

            -
              -
            • What is Acronis True Image 2018?
            • -
            • What is Acronis Universal Restore?
            • -
            • How to download Acronis True Image 2018 Build 11530 Universal Restore BootCD?
            • -
            • How to use Acronis True Image 2018 Build 11530 Universal Restore BootCD?
            • -
            • Conclusion
            • -
            • FAQs
            • -
            -

            What is Acronis True Image 2018?

            -

            Acronis True Image 2018 is a backup and recovery software that allows you to create a full or incremental backup of your system or data. A full backup is a complete copy of your system or data, while an incremental backup is a copy of the changes made since the last backup. You can store your backups on various destinations, such as an external hard drive, a network share, a cloud storage, or an optical disc.

            -

            Acronis True Image 2018 also allows you to restore your system or data from a backup in case of a disaster. You can restore your system or data to the same or different hardware, as long as it is compatible with your operating system. You can also restore your system or data to a specific point in time, using the Acronis Backup Explorer, which shows you the timeline of your backups and the changes made in each backup.

            -

            Some of the features and benefits of Acronis True Image 2018 are:

            -
              -
            • Active Protection: This feature protects your system and data from ransomware attacks, by detecting and blocking malicious processes and restoring affected files.
            • -
            • Active Cloning: This feature allows you to clone your system or data to another drive without stopping or restarting your system.
            • -
            • Active Disk Cleanup: This feature allows you to free up disk space by deleting unnecessary files, such as temporary files, cache files, or recycle bin files.
            • -
            • Crypto Mining Blocker: This feature prevents crypto mining malware from using your system resources and slowing down your performance.
            • -
            • WinPE Media Builder: This feature allows you to create a bootable media with Windows Preinstallation Environment (WinPE), which is a lightweight version of Windows that can run on any hardware.
            • -
            • Backup Statistics and Activity Dashboard: These features allow you to monitor and manage your backups, by showing you the size, speed, status, and duration of each backup.
            • -
            -

            How to create a backup with Acronis True Image 2018?

            -

            To create a backup with Acronis True Image 2018, you need to follow these steps:

            -
              -
            1. Launch Acronis True Image 2018 on your system.
            2. -
            3. Select Backup from the left menu.
            4. -
            5. Select the source that you want to backup. You can choose from Entire PC, Disk and partitions, Files and folders, Social media accounts, or Mobile devices.
            6. -
            7. Select the destination where you want to store your backup. You can choose from Acronis Cloud, Browse, Create new backup plan, or Select existing backup plan.
            8. -
            9. Select the backup options that you want to apply. You can choose from Schedule, Scheme, Cleanup rules, Password protection, Email notifications, or Advanced options.
            10. -
            11. Click on Back up now to start the backup operation. You can also click on Back up later to save the backup plan and run it later.
            12. -
            -

            You can view the progress and status of your backup in the Backup tab. You can also pause, resume, or stop the backup at any time.

            -

            How to restore your system or data with Acronis True Image 2018?

            -

            To restore your system or data with Acronis True Image 2018, you need to follow these steps:

            -
              -
            1. Launch Acronis True Image 2018 on your system.
            2. -
            3. Select Recovery from the left menu.
            4. -
            5. Select the backup that you want to restore from. You can choose from Acronis Cloud, Browse, or Select existing backup plan.
            6. -
            7. Select the data that you want to restore. You can choose from Entire PC, Disk and partitions, Files and folders, Social media accounts, or Mobile devices.
            8. -
            9. Select the destination where you want to restore your data. You can choose from Original location, New location, or Create new backup plan.
            10. -
            11. Select the recovery options that you want to apply. You can choose from Recovery method, Password protection, Email notifications, or Advanced options.
            12. -
            13. Click on Recover now to start the recovery operation. You can also click on Recover later to save the recovery plan and run it later.
            14. -
            -

            You can view the progress and status of your recovery in the Recovery tab. You can also pause, resume, or stop the recovery at any time.

            -

            -

            What is Acronis Universal Restore?

            -

            Acronis Universal Restore is a tool that allows you to restore your system to dissimilar hardware, even if it has different drivers, chipsets, or controllers. This tool is useful when you want to migrate your system to a new or different hardware, or when you want to recover your system from a hardware failure.

            -

            Acronis Universal Restore works by injecting the appropriate drivers into the restored system, so that it can boot and operate on the new hardware. You can provide the drivers manually, or let Acronis Universal Restore search for them automatically. You can also customize the restored system settings, such as the computer name, domain name, IP address, or license key.

            -

            Some of the features and benefits of Acronis Universal Restore are:

            -
              -
            • Cross-platform compatibility: This feature allows you to restore your system to any hardware platform, regardless of its manufacturer, model, or architecture.
            • -
            • Failsafe mode: This feature allows you to boot your restored system in a safe mode, in case of any driver issues or conflicts.
            • -
            • Disk signature preservation: This feature allows you to preserve the disk signature of your restored system, which is useful for some applications that rely on it for licensing or activation.
            • -
            • Disk partition alignment: This feature allows you to align the disk partitions of your restored system, which is useful for improving the performance and lifespan of solid state drives (SSDs).
            • -
            • Disk conversion: This feature allows you to convert the disk type of your restored system, from basic to dynamic, or vice versa.
            • -
            • < li>File system conversion: This feature allows you to convert the file system of your restored system, from FAT to NTFS, or vice versa.
            • -
            -

            How to create a bootable media with Acronis Universal Restore?

            -

            To create a bootable media with Acronis Universal Restore, you need to follow these steps:

            -
              -
            1. Launch Acronis True Image 2018 on your system.
            2. -
            3. Select Tools from the left menu.
            4. -
            5. Select Rescue Media Builder from the list of tools.
            6. -
            7. Select Simple or Advanced mode. The simple mode will create a bootable media with the default settings, while the advanced mode will allow you to customize the bootable media settings.
            8. -
            9. Select the type of bootable media that you want to create. You can choose from CD/DVD, USB flash drive, ISO file, or WIM file.
            10. -
            11. Select the components that you want to include in the bootable media. You can choose from Acronis True Image 2018, Acronis Universal Restore, or Acronis System Report.
            12. -
            13. Select the drivers that you want to add to the bootable media. You can choose from Browse for drivers, Add drivers from this PC, or Add drivers from Acronis driver database.
            14. -
            15. Select the destination where you want to save or burn the bootable media.
            16. -
            17. Click on Proceed to start creating the bootable media.
            18. -
            -

            You can view the progress and status of creating the bootable media in the Rescue Media Builder window. You can also cancel the operation at any time.

            -

            How to restore your system to dissimilar hardware with Acronis Universal Restore?

            -

            To restore your system to dissimilar hardware with Acronis Universal Restore, you need to follow these steps:

            -
              -
            1. Boot your system from the bootable media that contains Acronis Universal Restore.
            2. -
            3. Select Acronis True Image 2018 from the boot menu.
            4. -
            5. Select Recovery from the left menu.
            6. -
            7. Select the backup that contains your system that you want to restore.
            8. -
            9. Select the disk or partition that contains your system that you want to restore.
            10. -
            11. Select the destination where you want to restore your system. You can choose from New location, Create new backup plan, or Browse for backup plan.
            12. -
            13. Select the recovery options that you want to apply. You can choose from Password protection, Email notifications, or < Advanced options.
            14. -
            15. Select Acronis Universal Restore from the recovery options.
            16. -
            17. Select the drivers that you want to inject into the restored system. You can choose from Browse for drivers, Add drivers from this PC, Add drivers from Acronis driver database, or Search for drivers automatically.
            18. -
            19. Select the system settings that you want to customize for the restored system. You can choose from Computer name, Domain name, IP address, or License key.
            20. -
            21. Click on Recover now to start the universal restore operation.
            22. -
            -

            You can view the progress and status of the universal restore in the Recovery tab. You can also pause, resume, or stop the operation at any time.

            -

            How to download Acronis True Image 2018 Build 11530 Universal Restore BootCD?

            -

            To download Acronis True Image 2018 Build 11530 Universal Restore BootCD, you have two options:

            -
              -
            • You can download it from the official website of Acronis, by logging in with your account and accessing the Downloads section. You will need to have a valid license key for Acronis True Image 2018 to download the bootable media.
            • -
            • You can download it from other sources, such as torrent sites or file sharing platforms. However, you should be careful about the authenticity and integrity of the downloaded file, as it may contain viruses or malware.
            • -
            -

            The size of the downloaded file is about 600 MB, and it is in ISO format. You will need to verify the integrity of the downloaded file before burning it to a CD or USB drive.

            -

            How to verify the integrity of the downloaded file?

            -

            To verify the integrity of the downloaded file, you need to check its checksum or signature. A checksum is a string of numbers and letters that is generated by a mathematical algorithm, based on the content of the file. A signature is a digital certificate that is issued by a trusted authority, based on the identity of the file creator. Both methods are used to ensure that the file has not been tampered with or corrupted during the download process.

            -

            To check the checksum or signature of the downloaded file, you need to follow these steps:

            -
              -
            1. Download a tool that can calculate or verify checksums or signatures, such as HashCalc, WinMD5Free, or Gpg4win.
            2. -
            3. Install and run the tool on your system.
            4. -
            5. Select the downloaded file and choose the checksum or signature algorithm that matches the one provided by the source. For example, if the source provides a SHA-256 checksum, you should choose SHA-256 as well.
            6. -
            7. Compare the calculated or verified checksum or signature with the one provided by the source. If they match, it means that the file is authentic and intact. If they do not match, it means that the file is corrupted or modified.
            8. -
            -

            You can also use online tools to check the checksum or signature of the downloaded file, such as Online MD5, Online SHA-256, or Online Signature Verifier. However, you should be careful about uploading sensitive or confidential files to these tools, as they may store or share your data without your consent.

            -

            How to burn the downloaded file to a CD or USB drive?

            -

            To burn the downloaded file to a CD or USB drive, you need to follow these steps:

            -
              -
            1. Download a tool that can burn ISO files to CDs or USB drives, such as Rufus, PowerISO, or ImgBurn.
            2. -
            3. Install and run the tool on your system.
            4. -
            5. Insert a blank CD or USB drive into your system.
            6. -
            7. Select the downloaded file and choose the CD or USB drive as the destination.
            8. -
            9. Select the burning options that you want to apply. You can choose from Write speed, File system, Bootable, or Advanced options.
            10. -
            11. Click on Start to start burning the file to the CD or USB drive.
            12. -
            -

            You can view the progress and status of burning the file in the tool window. You can also cancel the operation at any time.

            -

            How to use Acronis True Image 2018 Build 11530 Universal Restore BootCD?

            -

            To use Acronis True Image 2018 Build 11530 Universal Restore BootCD, you need to follow these steps:

            -
              -
            1. Insert the CD or USB drive that contains Acronis True Image 2018 Build 11530 Universal Restore BootCD into your system.
            2. -
            3. Restart your system and enter the BIOS or UEFI settings.
            4. -
            5. Change the boot order to make the CD or USB drive the first boot device.
            6. -
            7. Save and exit the BIOS or UEFI settings.
            8. -
            9. Your system will boot from Acronis True Image 2018 Build 11530 Universal Restore BootCD and show you a boot menu.
            10. -
            11. Select Acronis True Image 2018 from the boot menu to access the backup and recovery software.
            12. -
            13. Select Backup or Recovery from the left menu to backup or restore your system or data.
            14. -
            15. Select Acronis Universal Restore from the recovery options to restore your system to dissimilar hardware.
            16. -
            17. Follow the instructions on the screen to complete the backup, recovery, or universal restore operation.
            18. -
            -

            Conclusion

            -

            In this article, we have shown you how to download and use Acronis True Image 2018 Build 11530 Universal Restore BootCD, which is a bootable media that contains both Acronis True Image 2018 and Acronis Universal Restore. With this bootable media, you can backup, restore, or migrate your system without installing any software on your hard drive. You can also use it to access your system when it is unbootable or corrupted.

            -

            We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to contact us. We would love to hear from you and assist you with any issues that you may have.

            -

            If you are interested in downloading Acronis True Image 2018 Build 11530 Universal Restore BootCD, you can click on the link below and get it now. You will need a valid license key for Acronis True Image 2018 to download the bootable media. You will also need a blank CD or USB drive to burn the bootable media. Don't miss this opportunity to get one of the best backup and recovery software in the market. Download Acronis True Image 2018 Build 11530 Universal Restore BootCD today and enjoy the peace of mind that comes with having a secure and reliable backup of your system and data.

            -

            FAQs

            -

            Here are some of the frequently asked questions and answers about Acronis True Image 2018 Build 11530 Universal Restore BootCD:

            -
              -
            1. Q: What are the system requirements for Acronis True Image 2018 Build 11530 Universal Restore BootCD?
            2. -
            3. A: The system requirements for Acronis True Image 2018 Build 11530 Universal Restore BootCD are:
            4. -
                -
              • A PC with a 64-bit processor that supports SSE instructions.
              • -
              • At least 1 GB of RAM.
              • -
              • A CD or USB drive with at least 600 MB of free space.
              • -
              • A monitor with a resolution of at least 1024 x 768 pixels.
              • -
              • A mouse or a similar pointing device.
              • -
              • An Internet connection for downloading the bootable media and activating the software.
              • -
              -
            -
              -
            1. Q: How long does it take to backup or restore my system with Acronis True Image 2018 Build 11530 Universal Restore BootCD?
            2. -
            3. A: The time it takes to backup or restore your system with Acronis True Image 2018 Build 11530 Universal Restore BootCD depends on several factors, such as:
            4. -
                -
              • The size and speed of your source and destination drives.
              • -
              • The type and level of compression and encryption applied to your backup.
              • -
              • The amount and type of data that you backup or restore.
              • -
              • The performance and load of your system and network.
              • -
              -

              As a general rule, you can expect to backup or restore your system with Acronis True Image 2018 Build 11530 Universal Restore BootCD at a speed of about 1 GB per minute. However, this may vary depending on the factors mentioned above.

              -
            -
              -
            1. Q: Can I use Acronis True Image 2018 Build 11530 Universal Restore BootCD to backup or restore other operating systems, such as Linux or Mac OS?
            2. -
            3. A: No, you cannot use Acronis True Image 2018 Build 11530 Universal Restore BootCD to backup or restore other operating systems, such as Linux or Mac OS. Acronis True Image 2018 Build 11530 Universal Restore BootCD is designed to work only with Windows operating systems, from Windows XP to Windows 10. If you want to backup or restore other operating systems, you will need to use other products from Acronis, such as Acronis Backup for Linux or Acronis True Image for Mac.
            4. -
            -
              -
            1. Q: Can I use Acronis True Image 2018 Build 11530 Universal Restore BootCD to backup or restore my mobile devices, such as smartphones or tablets?
            2. -
            3. A: Yes, you can use Acronis True Image 2018 Build 11530 Universal Restore BootCD to backup or restore your mobile devices, such as smartphones or tablets. You will need to install the Acronis Mobile app on your mobile device and connect it to the same network as your PC. Then, you can select your mobile device as the source or destination for your backup or recovery. You can backup or restore your contacts, messages, photos, videos, music, documents, and apps from your mobile device.
            4. -
            -
              -
            1. Q: How can I get technical support for Acronis True Image 2018 Build 11530 Universal Restore BootCD?
            2. -
            3. A: If you have any technical issues or questions about Acronis True Image 2018 Build 11530 Universal Restore BootCD, you can contact the Acronis Customer Support team by visiting their website at https://www.acronis.com/en-us/support/. You can also access the online user guide, knowledge base, forum, chat, phone, or email support from there. You will need to provide your license key and product version when contacting the support team.
            4. -

            b2dd77e56b
            -
            -
            \ No newline at end of file diff --git a/spaces/ofig/live-lm-critic/gec/download_data.sh b/spaces/ofig/live-lm-critic/gec/download_data.sh deleted file mode 100644 index 2336bcf975861439cca7ef934d13b8bec6ea53b4..0000000000000000000000000000000000000000 --- a/spaces/ofig/live-lm-critic/gec/download_data.sh +++ /dev/null @@ -1,43 +0,0 @@ -conda activate errant200 - - -######################## Set up benckmarks ######################## -mkdir -p benchmarks -cd benchmarks - -#Prepare CoNLL2014 -wget https://www.comp.nus.edu.sg/~nlp/conll14st/conll14st-test-data.tar.gz -tar -xf conll14st-test-data.tar.gz -python3 scripts/get_orig_from_m2.py conll14st-test-data/noalt/official-2014.combined.m2 \ - -out conll14st-test-data/noalt/official-2014.combined.orig.txt - - -#Prepare BEA2019 -wget https://www.cl.cam.ac.uk/research/nl/bea2019st/data/wi+locness_v2.1.bea19.tar.gz -tar -xf wi+locness_v2.1.bea19.tar.gz -mv wi+locness wi+locness_v2.1.bea19 -python3 scripts/get_orig_from_m2.py wi+locness_v2.1.bea19/m2/ABCN.dev.gold.bea19.m2 \ - -out wi+locness_v2.1.bea19/m2/ABCN.dev.bea19.orig.txt - - -#Prepare GMEG-wiki and -yahoo -git clone https://github.com/grammarly/GMEG.git -root=GMEG/data/test/wiki -errant_parallel -orig $root/source \ - -cor $root/ref0 $root/ref1 $root/ref2 $root/ref3 \ - -out $root/ref.m2 - -root=GMEG/data/test/yahoo -errant_parallel -orig $root/source \ - -cor $root/ref0 $root/ref1 $root/ref2 $root/ref3 \ - -out $root/ref.m2 - - -#Download M2 scorer -git clone https://github.com/nusnlp/m2scorer.git - - -######################## Download training data ######################## -cd ../ -wget https://nlp.stanford.edu/projects/myasu/LM-Critic/data.zip -unzip data.zip diff --git a/spaces/onursavas/meta-llama-2-7b-hf/README.md b/spaces/onursavas/meta-llama-2-7b-hf/README.md deleted file mode 100644 index d7c0f31d8191641706e266c7d3aab715788691bb..0000000000000000000000000000000000000000 --- a/spaces/onursavas/meta-llama-2-7b-hf/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Meta Llama Llama 2 7b Hf -emoji: 😻 -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 3.37.0 -app_file: app.py -pinned: false -duplicated_from: maheshwaranumapathy/meta-llama-Llama-2-7b-hf ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/openMUSE/MUSE-vs-SD.1.5/app.py b/spaces/openMUSE/MUSE-vs-SD.1.5/app.py deleted file mode 100644 index 8f283f168e6514c9acf80da1ca95771234671f0c..0000000000000000000000000000000000000000 --- a/spaces/openMUSE/MUSE-vs-SD.1.5/app.py +++ /dev/null @@ -1,146 +0,0 @@ -import gradio as gr -from PIL import Image -import torch -from muse import PipelineMuse -from diffusers import AutoPipelineForText2Image, UniPCMultistepScheduler - -PAD_TOKEN = "<|endoftext|>" - -muse_256 = PipelineMuse.from_pretrained("openMUSE/muse-256").to("cuda", dtype=torch.float16) -muse_256.transformer.enable_xformers_memory_efficient_attention() -muse_256.tokenizer.pad_token = PAD_TOKEN - -text_encoder = muse_256.text_encoder -vae = muse_256.vae - -muse_512 = PipelineMuse.from_pretrained("openMUSE/muse-512", text_encoder=text_encoder, vae=vae).to("cuda", dtype=torch.float16) -muse_512.transformer.enable_xformers_memory_efficient_attention() -muse_512.tokenizer.pad_token = PAD_TOKEN - -muse_512_fine = PipelineMuse.from_pretrained("openMUSE/muse-512-finetuned", text_encoder=text_encoder, vae=vae).to("cuda", dtype=torch.float16) -muse_512_fine.transformer.enable_xformers_memory_efficient_attention() -muse_512_fine.tokenizer.pad_token = PAD_TOKEN - -sdv1_5 = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None, variant="fp16").to("cuda", torch_dtype=torch.float16) -sdv1_5.scheduler = UniPCMultistepScheduler.from_config(sdv1_5.scheduler.config) -sdv1_5.enable_xformers_memory_efficient_attention() - -def infer(prompt): - print("Generating 256:") - muse_256_image = muse_256( - prompt, timesteps=16, guidance_scale=10, transformer_seq_len=256, use_fp16=True, temperature=(2, 0), - )[0] - - print("Generating 512:") - muse_512_image = muse_512( - prompt, timesteps=16, guidance_scale=10, transformer_seq_len=1024, use_fp16=True, temperature=(2, 0), - )[0] - - print("Generating 512 fine-tuned:") - muse_512_fine_image = muse_512_fine( - prompt, timesteps=16, guidance_scale=10, transformer_seq_len=1024, use_fp16=True, temperature=(2, 0), - )[0] - - print("Generating SD v15 fine-tuned:") - sdv1_5_image = sdv1_5(prompt, num_inference_steps=25).images[0] - - # First SDv15, then 256 base, then 512 base, then 512 fine-tuned - images = [sdv1_5_image, muse_256_image, muse_512_image, muse_512_fine_image] - - return images - - -examples = [ - [ - "A small cabin on top of a snowy mountain in the style of Disney, artstation", - ], - [ - "a monkey doing yoga on the beach", - ], - [ - "half human half cat, a human cat hybrid", - ], - [ - "a hedgehog using a calculator", - ], - [ - "kanye west | diffuse lighting | fantasy | intricate elegant highly detailed lifelike photorealistic digital painting | artstation", - ], - [ - "astronaut pig", - ], - [ - "two people shouting at each other", - ], - [ - "A linked in profile picture of Elon Musk", - ], - [ - "A man looking out of a rainy window", - ], - [ - "close up, iron man, eating breakfast in a cabin, symmetrical balance, hyper-realistic --ar 16:9 --style raw" - ], - [ - 'A high tech solarpunk utopia in the Amazon rainforest', - ], - [ - 'A pikachu fine dining with a view to the Eiffel Tower', - ], - [ - 'A mecha robot in a favela in expressionist style', - ], - [ - 'an insect robot preparing a delicious meal', - ], -] - - -css = """ -h1 { - text-align: center; -} - -#component-0 { - max-width: 730px; - margin: auto; -} -""" - -block = gr.Blocks(css=css) - -with block: - gr.Markdown("SDv15 vs. MUSE.") - with gr.Group(): - with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True): - with gr.Column(): - text = gr.Textbox( - label="Enter your prompt", - show_label=False, - max_lines=1, - placeholder="Enter your prompt", - container=False, - ) - btn = gr.Button("Generate image", scale=0) - - with gr.Row(): - with gr.Column(min_width=256) as c1: - image_1 = gr.Image(interactive=False) - image_1_label = gr.Markdown("SD V1-5") - with gr.Column(min_width=256) as c2: - image_2 = gr.Image(interactive=False) - image_2_label = gr.Markdown("MUSE-Base-256") - with gr.Column(min_width=256) as c2: - image_3 = gr.Image(interactive=False) - image_3_label = gr.Markdown("MUSE-Base-512") - with gr.Column(min_width=256) as c3: - image_4 = gr.Image(interactive=False) - image_4_label = gr.Markdown("MUSE-Finetuned-512") - - ex = gr.Examples(examples=examples, fn=infer, inputs=[text], outputs=[image_1, image_2, image_3, image_4], cache_examples=False) - ex.dataset.headers = [""] - - text.submit(infer, inputs=[text], outputs=[image_1, image_2, image_3, image_4]) - btn.click(infer, inputs=[text], outputs=[image_1, image_2, image_3, image_4]) - -block.launch() diff --git a/spaces/osanseviero/voice-cloning-public/Makefile b/spaces/osanseviero/voice-cloning-public/Makefile deleted file mode 100644 index ad23323414bd2175956f6aef92f223a02f7258be..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/voice-cloning-public/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -.PHONY: quality style - -# Check that source code meets quality standards -quality: - black --check --diff . - ruff . - -# Format source code automatically -style: - black . - ruff . --fix diff --git a/spaces/oyl344531959/White-box-Cartoonization/README.md b/spaces/oyl344531959/White-box-Cartoonization/README.md deleted file mode 100644 index 9860239cf42c94e385faaaa75a85311e010d64f7..0000000000000000000000000000000000000000 --- a/spaces/oyl344531959/White-box-Cartoonization/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -python_version: 3.7 -title: White Box Cartoonization -emoji: 📚 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: hylee/White-box-Cartoonization ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/dreambooth/train_dreambooth_lora.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/dreambooth/train_dreambooth_lora.py deleted file mode 100644 index dc90d10f2b26c4be4fb49c92d171375038c4b3f8..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/dreambooth/train_dreambooth_lora.py +++ /dev/null @@ -1,1425 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and - -import argparse -import copy -import gc -import hashlib -import itertools -import logging -import math -import os -import shutil -import warnings -from pathlib import Path -from typing import Dict - -import numpy as np -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -import transformers -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import ProjectConfiguration, set_seed -from huggingface_hub import create_repo, upload_folder -from packaging import version -from PIL import Image -from PIL.ImageOps import exif_transpose -from torch.utils.data import Dataset -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import AutoTokenizer, PretrainedConfig - -import diffusers -from diffusers import ( - AutoencoderKL, - DDPMScheduler, - DiffusionPipeline, - DPMSolverMultistepScheduler, - StableDiffusionPipeline, - UNet2DConditionModel, -) -from diffusers.loaders import ( - LoraLoaderMixin, - text_encoder_lora_state_dict, -) -from diffusers.models.attention_processor import ( - AttnAddedKVProcessor, - AttnAddedKVProcessor2_0, - LoRAAttnAddedKVProcessor, - LoRAAttnProcessor, - LoRAAttnProcessor2_0, - SlicedAttnAddedKVProcessor, -) -from diffusers.optimization import get_scheduler -from diffusers.utils import check_min_version, is_wandb_available -from diffusers.utils.import_utils import is_xformers_available - - -# Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.22.0.dev0") - -logger = get_logger(__name__) - - -def save_model_card( - repo_id: str, - images=None, - base_model=str, - train_text_encoder=False, - prompt=str, - repo_folder=None, - pipeline: DiffusionPipeline = None, -): - img_str = "" - for i, image in enumerate(images): - image.save(os.path.join(repo_folder, f"image_{i}.png")) - img_str += f"![img_{i}](./image_{i}.png)\n" - - yaml = f""" ---- -license: creativeml-openrail-m -base_model: {base_model} -instance_prompt: {prompt} -tags: -- {'stable-diffusion' if isinstance(pipeline, StableDiffusionPipeline) else 'if'} -- {'stable-diffusion-diffusers' if isinstance(pipeline, StableDiffusionPipeline) else 'if-diffusers'} -- text-to-image -- diffusers -- lora -inference: true ---- - """ - model_card = f""" -# LoRA DreamBooth - {repo_id} - -These are LoRA adaption weights for {base_model}. The weights were trained on {prompt} using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. \n -{img_str} - -LoRA for the text encoder was enabled: {train_text_encoder}. -""" - with open(os.path.join(repo_folder, "README.md"), "w") as f: - f.write(yaml + model_card) - - -def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): - text_encoder_config = PretrainedConfig.from_pretrained( - pretrained_model_name_or_path, - subfolder="text_encoder", - revision=revision, - ) - model_class = text_encoder_config.architectures[0] - - if model_class == "CLIPTextModel": - from transformers import CLIPTextModel - - return CLIPTextModel - elif model_class == "RobertaSeriesModelWithTransformation": - from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation - - return RobertaSeriesModelWithTransformation - elif model_class == "T5EncoderModel": - from transformers import T5EncoderModel - - return T5EncoderModel - else: - raise ValueError(f"{model_class} is not supported.") - - -def parse_args(input_args=None): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--revision", - type=str, - default=None, - required=False, - help="Revision of pretrained model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--instance_data_dir", - type=str, - default=None, - required=True, - help="A folder containing the training data of instance images.", - ) - parser.add_argument( - "--class_data_dir", - type=str, - default=None, - required=False, - help="A folder containing the training data of class images.", - ) - parser.add_argument( - "--instance_prompt", - type=str, - default=None, - required=True, - help="The prompt with identifier specifying the instance", - ) - parser.add_argument( - "--class_prompt", - type=str, - default=None, - help="The prompt to specify images in the same class as provided instance images.", - ) - parser.add_argument( - "--validation_prompt", - type=str, - default=None, - help="A prompt that is used during validation to verify that the model is learning.", - ) - parser.add_argument( - "--num_validation_images", - type=int, - default=4, - help="Number of images that should be generated during validation with `validation_prompt`.", - ) - parser.add_argument( - "--validation_epochs", - type=int, - default=50, - help=( - "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt" - " `args.validation_prompt` multiple times: `args.num_validation_images`." - ), - ) - parser.add_argument( - "--with_prior_preservation", - default=False, - action="store_true", - help="Flag to add prior preservation loss.", - ) - parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") - parser.add_argument( - "--num_class_images", - type=int, - default=100, - help=( - "Minimal class images for prior preservation loss. If there are not enough images already present in" - " class_data_dir, additional images will be sampled with class_prompt." - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="lora-dreambooth-model", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", - default=False, - action="store_true", - help=( - "Whether to center crop the input images to the resolution. If not set, the images will be randomly" - " cropped. The images will be resized to the resolution first before cropping." - ), - ) - parser.add_argument( - "--train_text_encoder", - action="store_true", - help="Whether to train the text encoder. If set, the text encoder should be float32 precision.", - ) - parser.add_argument( - "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument( - "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." - ) - parser.add_argument("--num_train_epochs", type=int, default=1) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--checkpointing_steps", - type=int, - default=500, - help=( - "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" - " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" - " training using `--resume_from_checkpoint`." - ), - ) - parser.add_argument( - "--checkpoints_total_limit", - type=int, - default=None, - help=("Max number of checkpoints to store."), - ) - parser.add_argument( - "--resume_from_checkpoint", - type=str, - default=None, - help=( - "Whether training should be resumed from a previous checkpoint. Use a path saved by" - ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' - ), - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-4, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--lr_num_cycles", - type=int, - default=1, - help="Number of hard resets of the lr in cosine_with_restarts scheduler.", - ) - parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") - parser.add_argument( - "--dataloader_num_workers", - type=int, - default=0, - help=( - "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." - ), - ) - parser.add_argument( - "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--allow_tf32", - action="store_true", - help=( - "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" - " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" - ), - ) - parser.add_argument( - "--report_to", - type=str, - default="tensorboard", - help=( - 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' - ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default=None, - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" - " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" - " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." - ), - ) - parser.add_argument( - "--prior_generation_precision", - type=str, - default=None, - choices=["no", "fp32", "fp16", "bf16"], - help=( - "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" - " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." - ), - ) - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - parser.add_argument( - "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." - ) - parser.add_argument( - "--pre_compute_text_embeddings", - action="store_true", - help="Whether or not to pre-compute text embeddings. If text embeddings are pre-computed, the text encoder will not be kept in memory during training and will leave more GPU memory available for training the rest of the model. This is not compatible with `--train_text_encoder`.", - ) - parser.add_argument( - "--tokenizer_max_length", - type=int, - default=None, - required=False, - help="The maximum length of the tokenizer. If not set, will default to the tokenizer's max length.", - ) - parser.add_argument( - "--text_encoder_use_attention_mask", - action="store_true", - required=False, - help="Whether to use attention mask for the text encoder", - ) - parser.add_argument( - "--validation_images", - required=False, - default=None, - nargs="+", - help="Optional set of images to use for validation. Used when the target pipeline takes an initial image as input such as when training image variation or superresolution.", - ) - parser.add_argument( - "--class_labels_conditioning", - required=False, - default=None, - help="The optional `class_label` conditioning to pass to the unet, available values are `timesteps`.", - ) - parser.add_argument( - "--rank", - type=int, - default=4, - help=("The dimension of the LoRA update matrices."), - ) - - if input_args is not None: - args = parser.parse_args(input_args) - else: - args = parser.parse_args() - - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - if args.with_prior_preservation: - if args.class_data_dir is None: - raise ValueError("You must specify a data directory for class images.") - if args.class_prompt is None: - raise ValueError("You must specify prompt for class images.") - else: - # logger is not available yet - if args.class_data_dir is not None: - warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") - if args.class_prompt is not None: - warnings.warn("You need not use --class_prompt without --with_prior_preservation.") - - if args.train_text_encoder and args.pre_compute_text_embeddings: - raise ValueError("`--train_text_encoder` cannot be used with `--pre_compute_text_embeddings`") - - return args - - -class DreamBoothDataset(Dataset): - """ - A dataset to prepare the instance and class images with the prompts for fine-tuning the model. - It pre-processes the images and the tokenizes prompts. - """ - - def __init__( - self, - instance_data_root, - instance_prompt, - tokenizer, - class_data_root=None, - class_prompt=None, - class_num=None, - size=512, - center_crop=False, - encoder_hidden_states=None, - class_prompt_encoder_hidden_states=None, - tokenizer_max_length=None, - ): - self.size = size - self.center_crop = center_crop - self.tokenizer = tokenizer - self.encoder_hidden_states = encoder_hidden_states - self.class_prompt_encoder_hidden_states = class_prompt_encoder_hidden_states - self.tokenizer_max_length = tokenizer_max_length - - self.instance_data_root = Path(instance_data_root) - if not self.instance_data_root.exists(): - raise ValueError("Instance images root doesn't exists.") - - self.instance_images_path = list(Path(instance_data_root).iterdir()) - self.num_instance_images = len(self.instance_images_path) - self.instance_prompt = instance_prompt - self._length = self.num_instance_images - - if class_data_root is not None: - self.class_data_root = Path(class_data_root) - self.class_data_root.mkdir(parents=True, exist_ok=True) - self.class_images_path = list(self.class_data_root.iterdir()) - if class_num is not None: - self.num_class_images = min(len(self.class_images_path), class_num) - else: - self.num_class_images = len(self.class_images_path) - self._length = max(self.num_class_images, self.num_instance_images) - self.class_prompt = class_prompt - else: - self.class_data_root = None - - self.image_transforms = transforms.Compose( - [ - transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def __len__(self): - return self._length - - def __getitem__(self, index): - example = {} - instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) - instance_image = exif_transpose(instance_image) - - if not instance_image.mode == "RGB": - instance_image = instance_image.convert("RGB") - example["instance_images"] = self.image_transforms(instance_image) - - if self.encoder_hidden_states is not None: - example["instance_prompt_ids"] = self.encoder_hidden_states - else: - text_inputs = tokenize_prompt( - self.tokenizer, self.instance_prompt, tokenizer_max_length=self.tokenizer_max_length - ) - example["instance_prompt_ids"] = text_inputs.input_ids - example["instance_attention_mask"] = text_inputs.attention_mask - - if self.class_data_root: - class_image = Image.open(self.class_images_path[index % self.num_class_images]) - class_image = exif_transpose(class_image) - - if not class_image.mode == "RGB": - class_image = class_image.convert("RGB") - example["class_images"] = self.image_transforms(class_image) - - if self.class_prompt_encoder_hidden_states is not None: - example["class_prompt_ids"] = self.class_prompt_encoder_hidden_states - else: - class_text_inputs = tokenize_prompt( - self.tokenizer, self.class_prompt, tokenizer_max_length=self.tokenizer_max_length - ) - example["class_prompt_ids"] = class_text_inputs.input_ids - example["class_attention_mask"] = class_text_inputs.attention_mask - - return example - - -def collate_fn(examples, with_prior_preservation=False): - has_attention_mask = "instance_attention_mask" in examples[0] - - input_ids = [example["instance_prompt_ids"] for example in examples] - pixel_values = [example["instance_images"] for example in examples] - - if has_attention_mask: - attention_mask = [example["instance_attention_mask"] for example in examples] - - # Concat class and instance examples for prior preservation. - # We do this to avoid doing two forward passes. - if with_prior_preservation: - input_ids += [example["class_prompt_ids"] for example in examples] - pixel_values += [example["class_images"] for example in examples] - if has_attention_mask: - attention_mask += [example["class_attention_mask"] for example in examples] - - pixel_values = torch.stack(pixel_values) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - - input_ids = torch.cat(input_ids, dim=0) - - batch = { - "input_ids": input_ids, - "pixel_values": pixel_values, - } - - if has_attention_mask: - batch["attention_mask"] = attention_mask - - return batch - - -class PromptDataset(Dataset): - "A simple dataset to prepare the prompts to generate class images on multiple GPUs." - - def __init__(self, prompt, num_samples): - self.prompt = prompt - self.num_samples = num_samples - - def __len__(self): - return self.num_samples - - def __getitem__(self, index): - example = {} - example["prompt"] = self.prompt - example["index"] = index - return example - - -def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None): - if tokenizer_max_length is not None: - max_length = tokenizer_max_length - else: - max_length = tokenizer.model_max_length - - text_inputs = tokenizer( - prompt, - truncation=True, - padding="max_length", - max_length=max_length, - return_tensors="pt", - ) - - return text_inputs - - -def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_attention_mask=None): - text_input_ids = input_ids.to(text_encoder.device) - - if text_encoder_use_attention_mask: - attention_mask = attention_mask.to(text_encoder.device) - else: - attention_mask = None - - prompt_embeds = text_encoder( - text_input_ids, - attention_mask=attention_mask, - ) - prompt_embeds = prompt_embeds[0] - - return prompt_embeds - - -def unet_attn_processors_state_dict(unet) -> Dict[str, torch.tensor]: - r""" - Returns: - a state dict containing just the attention processor parameters. - """ - attn_processors = unet.attn_processors - - attn_processors_state_dict = {} - - for attn_processor_key, attn_processor in attn_processors.items(): - for parameter_key, parameter in attn_processor.state_dict().items(): - attn_processors_state_dict[f"{attn_processor_key}.{parameter_key}"] = parameter - - return attn_processors_state_dict - - -def main(args): - logging_dir = Path(args.output_dir, args.logging_dir) - - accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) - - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with=args.report_to, - project_config=accelerator_project_config, - ) - - if args.report_to == "wandb": - if not is_wandb_available(): - raise ImportError("Make sure to install wandb if you want to use it for logging during training.") - import wandb - - # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate - # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. - # TODO (sayakpaul): Remove this check when gradient accumulation with two models is enabled in accelerate. - if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: - raise ValueError( - "Gradient accumulation is not supported when training the text encoder in distributed training. " - "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." - ) - - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - logger.info(accelerator.state, main_process_only=False) - if accelerator.is_local_main_process: - transformers.utils.logging.set_verbosity_warning() - diffusers.utils.logging.set_verbosity_info() - else: - transformers.utils.logging.set_verbosity_error() - diffusers.utils.logging.set_verbosity_error() - - # If passed along, set the training seed now. - if args.seed is not None: - set_seed(args.seed) - - # Generate class images if prior preservation is enabled. - if args.with_prior_preservation: - class_images_dir = Path(args.class_data_dir) - if not class_images_dir.exists(): - class_images_dir.mkdir(parents=True) - cur_class_images = len(list(class_images_dir.iterdir())) - - if cur_class_images < args.num_class_images: - torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 - if args.prior_generation_precision == "fp32": - torch_dtype = torch.float32 - elif args.prior_generation_precision == "fp16": - torch_dtype = torch.float16 - elif args.prior_generation_precision == "bf16": - torch_dtype = torch.bfloat16 - pipeline = DiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - torch_dtype=torch_dtype, - safety_checker=None, - revision=args.revision, - ) - pipeline.set_progress_bar_config(disable=True) - - num_new_images = args.num_class_images - cur_class_images - logger.info(f"Number of class images to sample: {num_new_images}.") - - sample_dataset = PromptDataset(args.class_prompt, num_new_images) - sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) - - sample_dataloader = accelerator.prepare(sample_dataloader) - pipeline.to(accelerator.device) - - for example in tqdm( - sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process - ): - images = pipeline(example["prompt"]).images - - for i, image in enumerate(images): - hash_image = hashlib.sha1(image.tobytes()).hexdigest() - image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" - image.save(image_filename) - - del pipeline - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # Handle the repository creation - if accelerator.is_main_process: - if args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - if args.push_to_hub: - repo_id = create_repo( - repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token - ).repo_id - - # Load the tokenizer - if args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) - elif args.pretrained_model_name_or_path: - tokenizer = AutoTokenizer.from_pretrained( - args.pretrained_model_name_or_path, - subfolder="tokenizer", - revision=args.revision, - use_fast=False, - ) - - # import correct text encoder class - text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) - - # Load scheduler and models - noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") - text_encoder = text_encoder_cls.from_pretrained( - args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision - ) - try: - vae = AutoencoderKL.from_pretrained( - args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision - ) - except OSError: - # IF does not have a VAE so let's just set it to None - # We don't have to error out here - vae = None - - unet = UNet2DConditionModel.from_pretrained( - args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision - ) - - # We only train the additional adapter LoRA layers - if vae is not None: - vae.requires_grad_(False) - text_encoder.requires_grad_(False) - unet.requires_grad_(False) - - # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision - # as these weights are only used for inference, keeping weights in full precision is not required. - weight_dtype = torch.float32 - if accelerator.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif accelerator.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move unet, vae and text_encoder to device and cast to weight_dtype - unet.to(accelerator.device, dtype=weight_dtype) - if vae is not None: - vae.to(accelerator.device, dtype=weight_dtype) - text_encoder.to(accelerator.device, dtype=weight_dtype) - - if args.enable_xformers_memory_efficient_attention: - if is_xformers_available(): - import xformers - - xformers_version = version.parse(xformers.__version__) - if xformers_version == version.parse("0.0.16"): - logger.warn( - "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." - ) - unet.enable_xformers_memory_efficient_attention() - else: - raise ValueError("xformers is not available. Make sure it is installed correctly") - - if args.gradient_checkpointing: - unet.enable_gradient_checkpointing() - if args.train_text_encoder: - text_encoder.gradient_checkpointing_enable() - - # now we will add new LoRA weights to the attention layers - # It's important to realize here how many attention weights will be added and of which sizes - # The sizes of the attention layers consist only of two different variables: - # 1) - the "hidden_size", which is increased according to `unet.config.block_out_channels`. - # 2) - the "cross attention size", which is set to `unet.config.cross_attention_dim`. - - # Let's first see how many attention processors we will have to set. - # For Stable Diffusion, it should be equal to: - # - down blocks (2x attention layers) * (2x transformer layers) * (3x down blocks) = 12 - # - mid blocks (2x attention layers) * (1x transformer layers) * (1x mid blocks) = 2 - # - up blocks (2x attention layers) * (3x transformer layers) * (3x down blocks) = 18 - # => 32 layers - - # Set correct lora layers - unet_lora_attn_procs = {} - unet_lora_parameters = [] - for name, attn_processor in unet.attn_processors.items(): - cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim - if name.startswith("mid_block"): - hidden_size = unet.config.block_out_channels[-1] - elif name.startswith("up_blocks"): - block_id = int(name[len("up_blocks.")]) - hidden_size = list(reversed(unet.config.block_out_channels))[block_id] - elif name.startswith("down_blocks"): - block_id = int(name[len("down_blocks.")]) - hidden_size = unet.config.block_out_channels[block_id] - - if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)): - lora_attn_processor_class = LoRAAttnAddedKVProcessor - else: - lora_attn_processor_class = ( - LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor - ) - - module = lora_attn_processor_class( - hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=args.rank - ) - unet_lora_attn_procs[name] = module - unet_lora_parameters.extend(module.parameters()) - - unet.set_attn_processor(unet_lora_attn_procs) - - # The text encoder comes from 🤗 transformers, so we cannot directly modify it. - # So, instead, we monkey-patch the forward calls of its attention-blocks. - if args.train_text_encoder: - # ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16 - text_lora_parameters = LoraLoaderMixin._modify_text_encoder(text_encoder, dtype=torch.float32, rank=args.rank) - - # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format - def save_model_hook(models, weights, output_dir): - if accelerator.is_main_process: - # there are only two options here. Either are just the unet attn processor layers - # or there are the unet and text encoder atten layers - unet_lora_layers_to_save = None - text_encoder_lora_layers_to_save = None - - for model in models: - if isinstance(model, type(accelerator.unwrap_model(unet))): - unet_lora_layers_to_save = unet_attn_processors_state_dict(model) - elif isinstance(model, type(accelerator.unwrap_model(text_encoder))): - text_encoder_lora_layers_to_save = text_encoder_lora_state_dict(model) - else: - raise ValueError(f"unexpected save model: {model.__class__}") - - # make sure to pop weight so that corresponding model is not saved again - weights.pop() - - LoraLoaderMixin.save_lora_weights( - output_dir, - unet_lora_layers=unet_lora_layers_to_save, - text_encoder_lora_layers=text_encoder_lora_layers_to_save, - ) - - def load_model_hook(models, input_dir): - unet_ = None - text_encoder_ = None - - while len(models) > 0: - model = models.pop() - - if isinstance(model, type(accelerator.unwrap_model(unet))): - unet_ = model - elif isinstance(model, type(accelerator.unwrap_model(text_encoder))): - text_encoder_ = model - else: - raise ValueError(f"unexpected save model: {model.__class__}") - - lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir) - LoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_) - LoraLoaderMixin.load_lora_into_text_encoder( - lora_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_ - ) - - accelerator.register_save_state_pre_hook(save_model_hook) - accelerator.register_load_state_pre_hook(load_model_hook) - - # Enable TF32 for faster training on Ampere GPUs, - # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices - if args.allow_tf32: - torch.backends.cuda.matmul.allow_tf32 = True - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." - ) - - optimizer_class = bnb.optim.AdamW8bit - else: - optimizer_class = torch.optim.AdamW - - # Optimizer creation - params_to_optimize = ( - itertools.chain(unet_lora_parameters, text_lora_parameters) - if args.train_text_encoder - else unet_lora_parameters - ) - optimizer = optimizer_class( - params_to_optimize, - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - if args.pre_compute_text_embeddings: - - def compute_text_embeddings(prompt): - with torch.no_grad(): - text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=args.tokenizer_max_length) - prompt_embeds = encode_prompt( - text_encoder, - text_inputs.input_ids, - text_inputs.attention_mask, - text_encoder_use_attention_mask=args.text_encoder_use_attention_mask, - ) - - return prompt_embeds - - pre_computed_encoder_hidden_states = compute_text_embeddings(args.instance_prompt) - validation_prompt_negative_prompt_embeds = compute_text_embeddings("") - - if args.validation_prompt is not None: - validation_prompt_encoder_hidden_states = compute_text_embeddings(args.validation_prompt) - else: - validation_prompt_encoder_hidden_states = None - - if args.class_prompt is not None: - pre_computed_class_prompt_encoder_hidden_states = compute_text_embeddings(args.class_prompt) - else: - pre_computed_class_prompt_encoder_hidden_states = None - - text_encoder = None - tokenizer = None - - gc.collect() - torch.cuda.empty_cache() - else: - pre_computed_encoder_hidden_states = None - validation_prompt_encoder_hidden_states = None - validation_prompt_negative_prompt_embeds = None - pre_computed_class_prompt_encoder_hidden_states = None - - # Dataset and DataLoaders creation: - train_dataset = DreamBoothDataset( - instance_data_root=args.instance_data_dir, - instance_prompt=args.instance_prompt, - class_data_root=args.class_data_dir if args.with_prior_preservation else None, - class_prompt=args.class_prompt, - class_num=args.num_class_images, - tokenizer=tokenizer, - size=args.resolution, - center_crop=args.center_crop, - encoder_hidden_states=pre_computed_encoder_hidden_states, - class_prompt_encoder_hidden_states=pre_computed_class_prompt_encoder_hidden_states, - tokenizer_max_length=args.tokenizer_max_length, - ) - - train_dataloader = torch.utils.data.DataLoader( - train_dataset, - batch_size=args.train_batch_size, - shuffle=True, - collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), - num_workers=args.dataloader_num_workers, - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, - num_training_steps=args.max_train_steps * accelerator.num_processes, - num_cycles=args.lr_num_cycles, - power=args.lr_power, - ) - - # Prepare everything with our `accelerator`. - if args.train_text_encoder: - unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, text_encoder, optimizer, train_dataloader, lr_scheduler - ) - else: - unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, optimizer, train_dataloader, lr_scheduler - ) - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - tracker_config = vars(copy.deepcopy(args)) - tracker_config.pop("validation_images") - accelerator.init_trackers("dreambooth-lora", config=tracker_config) - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num batches each epoch = {len(train_dataloader)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - global_step = 0 - first_epoch = 0 - - # Potentially load in the weights and states from a previous save - if args.resume_from_checkpoint: - if args.resume_from_checkpoint != "latest": - path = os.path.basename(args.resume_from_checkpoint) - else: - # Get the mos recent checkpoint - dirs = os.listdir(args.output_dir) - dirs = [d for d in dirs if d.startswith("checkpoint")] - dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) - path = dirs[-1] if len(dirs) > 0 else None - - if path is None: - accelerator.print( - f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." - ) - args.resume_from_checkpoint = None - else: - accelerator.print(f"Resuming from checkpoint {path}") - accelerator.load_state(os.path.join(args.output_dir, path)) - global_step = int(path.split("-")[1]) - - resume_global_step = global_step * args.gradient_accumulation_steps - first_epoch = global_step // num_update_steps_per_epoch - resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) - - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) - progress_bar.set_description("Steps") - - for epoch in range(first_epoch, args.num_train_epochs): - unet.train() - if args.train_text_encoder: - text_encoder.train() - for step, batch in enumerate(train_dataloader): - # Skip steps until we reach the resumed step - if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: - if step % args.gradient_accumulation_steps == 0: - progress_bar.update(1) - continue - - with accelerator.accumulate(unet): - pixel_values = batch["pixel_values"].to(dtype=weight_dtype) - - if vae is not None: - # Convert images to latent space - model_input = vae.encode(pixel_values).latent_dist.sample() - model_input = model_input * vae.config.scaling_factor - else: - model_input = pixel_values - - # Sample noise that we'll add to the latents - noise = torch.randn_like(model_input) - bsz, channels, height, width = model_input.shape - # Sample a random timestep for each image - timesteps = torch.randint( - 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device - ) - timesteps = timesteps.long() - - # Add noise to the model input according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps) - - # Get the text embedding for conditioning - if args.pre_compute_text_embeddings: - encoder_hidden_states = batch["input_ids"] - else: - encoder_hidden_states = encode_prompt( - text_encoder, - batch["input_ids"], - batch["attention_mask"], - text_encoder_use_attention_mask=args.text_encoder_use_attention_mask, - ) - - if accelerator.unwrap_model(unet).config.in_channels == channels * 2: - noisy_model_input = torch.cat([noisy_model_input, noisy_model_input], dim=1) - - if args.class_labels_conditioning == "timesteps": - class_labels = timesteps - else: - class_labels = None - - # Predict the noise residual - model_pred = unet( - noisy_model_input, timesteps, encoder_hidden_states, class_labels=class_labels - ).sample - - # if model predicts variance, throw away the prediction. we will only train on the - # simplified training objective. This means that all schedulers using the fine tuned - # model must be configured to use one of the fixed variance variance types. - if model_pred.shape[1] == 6: - model_pred, _ = torch.chunk(model_pred, 2, dim=1) - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(model_input, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - - if args.with_prior_preservation: - # Chunk the noise and model_pred into two parts and compute the loss on each part separately. - model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) - target, target_prior = torch.chunk(target, 2, dim=0) - - # Compute instance loss - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - # Compute prior loss - prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") - - # Add the prior loss to the instance loss. - loss = loss + args.prior_loss_weight * prior_loss - else: - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - accelerator.backward(loss) - if accelerator.sync_gradients: - params_to_clip = ( - itertools.chain(unet_lora_parameters, text_lora_parameters) - if args.train_text_encoder - else unet_lora_parameters - ) - accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - - if accelerator.is_main_process: - if global_step % args.checkpointing_steps == 0: - # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` - if args.checkpoints_total_limit is not None: - checkpoints = os.listdir(args.output_dir) - checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] - checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) - - # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints - if len(checkpoints) >= args.checkpoints_total_limit: - num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 - removing_checkpoints = checkpoints[0:num_to_remove] - - logger.info( - f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" - ) - logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") - - for removing_checkpoint in removing_checkpoints: - removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) - shutil.rmtree(removing_checkpoint) - - save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") - accelerator.save_state(save_path) - logger.info(f"Saved state to {save_path}") - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - accelerator.log(logs, step=global_step) - - if global_step >= args.max_train_steps: - break - - if accelerator.is_main_process: - if args.validation_prompt is not None and epoch % args.validation_epochs == 0: - logger.info( - f"Running validation... \n Generating {args.num_validation_images} images with prompt:" - f" {args.validation_prompt}." - ) - # create pipeline - pipeline = DiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=None if args.pre_compute_text_embeddings else accelerator.unwrap_model(text_encoder), - revision=args.revision, - torch_dtype=weight_dtype, - ) - - # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it - scheduler_args = {} - - if "variance_type" in pipeline.scheduler.config: - variance_type = pipeline.scheduler.config.variance_type - - if variance_type in ["learned", "learned_range"]: - variance_type = "fixed_small" - - scheduler_args["variance_type"] = variance_type - - pipeline.scheduler = DPMSolverMultistepScheduler.from_config( - pipeline.scheduler.config, **scheduler_args - ) - - pipeline = pipeline.to(accelerator.device) - pipeline.set_progress_bar_config(disable=True) - - # run inference - generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None - if args.pre_compute_text_embeddings: - pipeline_args = { - "prompt_embeds": validation_prompt_encoder_hidden_states, - "negative_prompt_embeds": validation_prompt_negative_prompt_embeds, - } - else: - pipeline_args = {"prompt": args.validation_prompt} - - if args.validation_images is None: - images = [] - for _ in range(args.num_validation_images): - with torch.cuda.amp.autocast(): - image = pipeline(**pipeline_args, generator=generator).images[0] - images.append(image) - else: - images = [] - for image in args.validation_images: - image = Image.open(image) - with torch.cuda.amp.autocast(): - image = pipeline(**pipeline_args, image=image, generator=generator).images[0] - images.append(image) - - for tracker in accelerator.trackers: - if tracker.name == "tensorboard": - np_images = np.stack([np.asarray(img) for img in images]) - tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") - if tracker.name == "wandb": - tracker.log( - { - "validation": [ - wandb.Image(image, caption=f"{i}: {args.validation_prompt}") - for i, image in enumerate(images) - ] - } - ) - - del pipeline - torch.cuda.empty_cache() - - # Save the lora layers - accelerator.wait_for_everyone() - if accelerator.is_main_process: - unet = accelerator.unwrap_model(unet) - unet = unet.to(torch.float32) - unet_lora_layers = unet_attn_processors_state_dict(unet) - - if text_encoder is not None and args.train_text_encoder: - text_encoder = accelerator.unwrap_model(text_encoder) - text_encoder = text_encoder.to(torch.float32) - text_encoder_lora_layers = text_encoder_lora_state_dict(text_encoder) - else: - text_encoder_lora_layers = None - - LoraLoaderMixin.save_lora_weights( - save_directory=args.output_dir, - unet_lora_layers=unet_lora_layers, - text_encoder_lora_layers=text_encoder_lora_layers, - ) - - # Final inference - # Load previous pipeline - pipeline = DiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, revision=args.revision, torch_dtype=weight_dtype - ) - - # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it - scheduler_args = {} - - if "variance_type" in pipeline.scheduler.config: - variance_type = pipeline.scheduler.config.variance_type - - if variance_type in ["learned", "learned_range"]: - variance_type = "fixed_small" - - scheduler_args["variance_type"] = variance_type - - pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args) - - pipeline = pipeline.to(accelerator.device) - - # load attention processors - pipeline.load_lora_weights(args.output_dir, weight_name="pytorch_lora_weights.safetensors") - - # run inference - images = [] - if args.validation_prompt and args.num_validation_images > 0: - generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None - images = [ - pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] - for _ in range(args.num_validation_images) - ] - - for tracker in accelerator.trackers: - if tracker.name == "tensorboard": - np_images = np.stack([np.asarray(img) for img in images]) - tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC") - if tracker.name == "wandb": - tracker.log( - { - "test": [ - wandb.Image(image, caption=f"{i}: {args.validation_prompt}") - for i, image in enumerate(images) - ] - } - ) - - if args.push_to_hub: - save_model_card( - repo_id, - images=images, - base_model=args.pretrained_model_name_or_path, - train_text_encoder=args.train_text_encoder, - prompt=args.instance_prompt, - repo_folder=args.output_dir, - pipeline=pipeline, - ) - upload_folder( - repo_id=repo_id, - folder_path=args.output_dir, - commit_message="End of training", - ignore_patterns=["step_*", "epoch_*"], - ) - - accelerator.end_training() - - -if __name__ == "__main__": - args = parse_args() - main(args) diff --git a/spaces/piecurus/Summarizer/README.md b/spaces/piecurus/Summarizer/README.md deleted file mode 100644 index a0407f921095e22692bf2f3d944d1e6e7fdf93ee..0000000000000000000000000000000000000000 --- a/spaces/piecurus/Summarizer/README.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Summarizer -emoji: 🐢 -colorFrom: pink -colorTo: gray -sdk: streamlit -app_file: app.py -pinned: false -license: mit ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/pierreguillou/bloomz-english/README.md b/spaces/pierreguillou/bloomz-english/README.md deleted file mode 100644 index 8c7138dd11cc11491bd5bca9c6c0308599166cf1..0000000000000000000000000000000000000000 --- a/spaces/pierreguillou/bloomz-english/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Bloomz (bigscience/bloomz-560m - english) -emoji: ⚡ -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pinkq/Newbing/tailwind.config.js b/spaces/pinkq/Newbing/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/pinkq/Newbing/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/pixiou/bingo/src/components/ui/tooltip.tsx b/spaces/pixiou/bingo/src/components/ui/tooltip.tsx deleted file mode 100644 index af1d48beb90dd5ae311796539843700871052cae..0000000000000000000000000000000000000000 --- a/spaces/pixiou/bingo/src/components/ui/tooltip.tsx +++ /dev/null @@ -1,30 +0,0 @@ -'use client' - -import * as React from 'react' -import * as TooltipPrimitive from '@radix-ui/react-tooltip' - -import { cn } from '@/lib/utils' - -const TooltipProvider = TooltipPrimitive.Provider - -const Tooltip = TooltipPrimitive.Root - -const TooltipTrigger = TooltipPrimitive.Trigger - -const TooltipContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, sideOffset = 4, ...props }, ref) => ( - -)) -TooltipContent.displayName = TooltipPrimitive.Content.displayName - -export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider } diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/lexer.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/lexer.py deleted file mode 100644 index eb2c1b46b6928363a1db20306c379b12668c5a47..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/lexer.py +++ /dev/null @@ -1,943 +0,0 @@ -""" - pygments.lexer - ~~~~~~~~~~~~~~ - - Base lexer classes. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -import sys -import time - -from pip._vendor.pygments.filter import apply_filters, Filter -from pip._vendor.pygments.filters import get_filter_by_name -from pip._vendor.pygments.token import Error, Text, Other, Whitespace, _TokenType -from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ - make_analysator, Future, guess_decode -from pip._vendor.pygments.regexopt import regex_opt - -__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer', - 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this', - 'default', 'words', 'line_re'] - -line_re = re.compile('.*?\n') - -_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'), - (b'\xff\xfe\0\0', 'utf-32'), - (b'\0\0\xfe\xff', 'utf-32be'), - (b'\xff\xfe', 'utf-16'), - (b'\xfe\xff', 'utf-16be')] - -_default_analyse = staticmethod(lambda x: 0.0) - - -class LexerMeta(type): - """ - This metaclass automagically converts ``analyse_text`` methods into - static methods which always return float values. - """ - - def __new__(mcs, name, bases, d): - if 'analyse_text' in d: - d['analyse_text'] = make_analysator(d['analyse_text']) - return type.__new__(mcs, name, bases, d) - - -class Lexer(metaclass=LexerMeta): - """ - Lexer for a specific language. - - See also :doc:`lexerdevelopment`, a high-level guide to writing - lexers. - - Lexer classes have attributes used for choosing the most appropriate - lexer based on various criteria. - - .. autoattribute:: name - :no-value: - .. autoattribute:: aliases - :no-value: - .. autoattribute:: filenames - :no-value: - .. autoattribute:: alias_filenames - .. autoattribute:: mimetypes - :no-value: - .. autoattribute:: priority - - Lexers included in Pygments should have an additional attribute: - - .. autoattribute:: url - :no-value: - - You can pass options to the constructor. The basic options recognized - by all lexers and processed by the base `Lexer` class are: - - ``stripnl`` - Strip leading and trailing newlines from the input (default: True). - ``stripall`` - Strip all leading and trailing whitespace from the input - (default: False). - ``ensurenl`` - Make sure that the input ends with a newline (default: True). This - is required for some lexers that consume input linewise. - - .. versionadded:: 1.3 - - ``tabsize`` - If given and greater than 0, expand tabs in the input (default: 0). - ``encoding`` - If given, must be an encoding name. This encoding will be used to - convert the input string to Unicode, if it is not already a Unicode - string (default: ``'guess'``, which uses a simple UTF-8 / Locale / - Latin1 detection. Can also be ``'chardet'`` to use the chardet - library, if it is installed. - ``inencoding`` - Overrides the ``encoding`` if given. - """ - - #: Full name of the lexer, in human-readable form - name = None - - #: A list of short, unique identifiers that can be used to look - #: up the lexer from a list, e.g., using `get_lexer_by_name()`. - aliases = [] - - #: A list of `fnmatch` patterns that match filenames which contain - #: content for this lexer. The patterns in this list should be unique among - #: all lexers. - filenames = [] - - #: A list of `fnmatch` patterns that match filenames which may or may not - #: contain content for this lexer. This list is used by the - #: :func:`.guess_lexer_for_filename()` function, to determine which lexers - #: are then included in guessing the correct one. That means that - #: e.g. every lexer for HTML and a template language should include - #: ``\*.html`` in this list. - alias_filenames = [] - - #: A list of MIME types for content that can be lexed with this lexer. - mimetypes = [] - - #: Priority, should multiple lexers match and no content is provided - priority = 0 - - #: URL of the language specification/definition. Used in the Pygments - #: documentation. - url = None - - def __init__(self, **options): - """ - This constructor takes arbitrary options as keyword arguments. - Every subclass must first process its own options and then call - the `Lexer` constructor, since it processes the basic - options like `stripnl`. - - An example looks like this: - - .. sourcecode:: python - - def __init__(self, **options): - self.compress = options.get('compress', '') - Lexer.__init__(self, **options) - - As these options must all be specifiable as strings (due to the - command line usage), there are various utility functions - available to help with that, see `Utilities`_. - """ - self.options = options - self.stripnl = get_bool_opt(options, 'stripnl', True) - self.stripall = get_bool_opt(options, 'stripall', False) - self.ensurenl = get_bool_opt(options, 'ensurenl', True) - self.tabsize = get_int_opt(options, 'tabsize', 0) - self.encoding = options.get('encoding', 'guess') - self.encoding = options.get('inencoding') or self.encoding - self.filters = [] - for filter_ in get_list_opt(options, 'filters', ()): - self.add_filter(filter_) - - def __repr__(self): - if self.options: - return '' % (self.__class__.__name__, - self.options) - else: - return '' % self.__class__.__name__ - - def add_filter(self, filter_, **options): - """ - Add a new stream filter to this lexer. - """ - if not isinstance(filter_, Filter): - filter_ = get_filter_by_name(filter_, **options) - self.filters.append(filter_) - - def analyse_text(text): - """ - A static method which is called for lexer guessing. - - It should analyse the text and return a float in the range - from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer - will not be selected as the most probable one, if it returns - ``1.0``, it will be selected immediately. This is used by - `guess_lexer`. - - The `LexerMeta` metaclass automatically wraps this function so - that it works like a static method (no ``self`` or ``cls`` - parameter) and the return value is automatically converted to - `float`. If the return value is an object that is boolean `False` - it's the same as if the return values was ``0.0``. - """ - - def get_tokens(self, text, unfiltered=False): - """ - This method is the basic interface of a lexer. It is called by - the `highlight()` function. It must process the text and return an - iterable of ``(tokentype, value)`` pairs from `text`. - - Normally, you don't need to override this method. The default - implementation processes the options recognized by all lexers - (`stripnl`, `stripall` and so on), and then yields all tokens - from `get_tokens_unprocessed()`, with the ``index`` dropped. - - If `unfiltered` is set to `True`, the filtering mechanism is - bypassed even if filters are defined. - """ - if not isinstance(text, str): - if self.encoding == 'guess': - text, _ = guess_decode(text) - elif self.encoding == 'chardet': - try: - from pip._vendor import chardet - except ImportError as e: - raise ImportError('To enable chardet encoding guessing, ' - 'please install the chardet library ' - 'from http://chardet.feedparser.org/') from e - # check for BOM first - decoded = None - for bom, encoding in _encoding_map: - if text.startswith(bom): - decoded = text[len(bom):].decode(encoding, 'replace') - break - # no BOM found, so use chardet - if decoded is None: - enc = chardet.detect(text[:1024]) # Guess using first 1KB - decoded = text.decode(enc.get('encoding') or 'utf-8', - 'replace') - text = decoded - else: - text = text.decode(self.encoding) - if text.startswith('\ufeff'): - text = text[len('\ufeff'):] - else: - if text.startswith('\ufeff'): - text = text[len('\ufeff'):] - - # text now *is* a unicode string - text = text.replace('\r\n', '\n') - text = text.replace('\r', '\n') - if self.stripall: - text = text.strip() - elif self.stripnl: - text = text.strip('\n') - if self.tabsize > 0: - text = text.expandtabs(self.tabsize) - if self.ensurenl and not text.endswith('\n'): - text += '\n' - - def streamer(): - for _, t, v in self.get_tokens_unprocessed(text): - yield t, v - stream = streamer() - if not unfiltered: - stream = apply_filters(stream, self.filters, self) - return stream - - def get_tokens_unprocessed(self, text): - """ - This method should process the text and return an iterable of - ``(index, tokentype, value)`` tuples where ``index`` is the starting - position of the token within the input text. - - It must be overridden by subclasses. It is recommended to - implement it as a generator to maximize effectiveness. - """ - raise NotImplementedError - - -class DelegatingLexer(Lexer): - """ - This lexer takes two lexer as arguments. A root lexer and - a language lexer. First everything is scanned using the language - lexer, afterwards all ``Other`` tokens are lexed using the root - lexer. - - The lexers from the ``template`` lexer package use this base lexer. - """ - - def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options): - self.root_lexer = _root_lexer(**options) - self.language_lexer = _language_lexer(**options) - self.needle = _needle - Lexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - buffered = '' - insertions = [] - lng_buffer = [] - for i, t, v in self.language_lexer.get_tokens_unprocessed(text): - if t is self.needle: - if lng_buffer: - insertions.append((len(buffered), lng_buffer)) - lng_buffer = [] - buffered += v - else: - lng_buffer.append((i, t, v)) - if lng_buffer: - insertions.append((len(buffered), lng_buffer)) - return do_insertions(insertions, - self.root_lexer.get_tokens_unprocessed(buffered)) - - -# ------------------------------------------------------------------------------ -# RegexLexer and ExtendedRegexLexer -# - - -class include(str): # pylint: disable=invalid-name - """ - Indicates that a state should include rules from another state. - """ - pass - - -class _inherit: - """ - Indicates the a state should inherit from its superclass. - """ - def __repr__(self): - return 'inherit' - -inherit = _inherit() # pylint: disable=invalid-name - - -class combined(tuple): # pylint: disable=invalid-name - """ - Indicates a state combined from multiple states. - """ - - def __new__(cls, *args): - return tuple.__new__(cls, args) - - def __init__(self, *args): - # tuple.__init__ doesn't do anything - pass - - -class _PseudoMatch: - """ - A pseudo match object constructed from a string. - """ - - def __init__(self, start, text): - self._text = text - self._start = start - - def start(self, arg=None): - return self._start - - def end(self, arg=None): - return self._start + len(self._text) - - def group(self, arg=None): - if arg: - raise IndexError('No such group') - return self._text - - def groups(self): - return (self._text,) - - def groupdict(self): - return {} - - -def bygroups(*args): - """ - Callback that yields multiple actions for each group in the match. - """ - def callback(lexer, match, ctx=None): - for i, action in enumerate(args): - if action is None: - continue - elif type(action) is _TokenType: - data = match.group(i + 1) - if data: - yield match.start(i + 1), action, data - else: - data = match.group(i + 1) - if data is not None: - if ctx: - ctx.pos = match.start(i + 1) - for item in action(lexer, - _PseudoMatch(match.start(i + 1), data), ctx): - if item: - yield item - if ctx: - ctx.pos = match.end() - return callback - - -class _This: - """ - Special singleton used for indicating the caller class. - Used by ``using``. - """ - -this = _This() - - -def using(_other, **kwargs): - """ - Callback that processes the match with a different lexer. - - The keyword arguments are forwarded to the lexer, except `state` which - is handled separately. - - `state` specifies the state that the new lexer will start in, and can - be an enumerable such as ('root', 'inline', 'string') or a simple - string which is assumed to be on top of the root state. - - Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. - """ - gt_kwargs = {} - if 'state' in kwargs: - s = kwargs.pop('state') - if isinstance(s, (list, tuple)): - gt_kwargs['stack'] = s - else: - gt_kwargs['stack'] = ('root', s) - - if _other is this: - def callback(lexer, match, ctx=None): - # if keyword arguments are given the callback - # function has to create a new lexer instance - if kwargs: - # XXX: cache that somehow - kwargs.update(lexer.options) - lx = lexer.__class__(**kwargs) - else: - lx = lexer - s = match.start() - for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): - yield i + s, t, v - if ctx: - ctx.pos = match.end() - else: - def callback(lexer, match, ctx=None): - # XXX: cache that somehow - kwargs.update(lexer.options) - lx = _other(**kwargs) - - s = match.start() - for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): - yield i + s, t, v - if ctx: - ctx.pos = match.end() - return callback - - -class default: - """ - Indicates a state or state action (e.g. #pop) to apply. - For example default('#pop') is equivalent to ('', Token, '#pop') - Note that state tuples may be used as well. - - .. versionadded:: 2.0 - """ - def __init__(self, state): - self.state = state - - -class words(Future): - """ - Indicates a list of literal words that is transformed into an optimized - regex that matches any of the words. - - .. versionadded:: 2.0 - """ - def __init__(self, words, prefix='', suffix=''): - self.words = words - self.prefix = prefix - self.suffix = suffix - - def get(self): - return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix) - - -class RegexLexerMeta(LexerMeta): - """ - Metaclass for RegexLexer, creates the self._tokens attribute from - self.tokens on the first instantiation. - """ - - def _process_regex(cls, regex, rflags, state): - """Preprocess the regular expression component of a token definition.""" - if isinstance(regex, Future): - regex = regex.get() - return re.compile(regex, rflags).match - - def _process_token(cls, token): - """Preprocess the token component of a token definition.""" - assert type(token) is _TokenType or callable(token), \ - 'token type must be simple type or callable, not %r' % (token,) - return token - - def _process_new_state(cls, new_state, unprocessed, processed): - """Preprocess the state transition action of a token definition.""" - if isinstance(new_state, str): - # an existing state - if new_state == '#pop': - return -1 - elif new_state in unprocessed: - return (new_state,) - elif new_state == '#push': - return new_state - elif new_state[:5] == '#pop:': - return -int(new_state[5:]) - else: - assert False, 'unknown new state %r' % new_state - elif isinstance(new_state, combined): - # combine a new state from existing ones - tmp_state = '_tmp_%d' % cls._tmpname - cls._tmpname += 1 - itokens = [] - for istate in new_state: - assert istate != new_state, 'circular state ref %r' % istate - itokens.extend(cls._process_state(unprocessed, - processed, istate)) - processed[tmp_state] = itokens - return (tmp_state,) - elif isinstance(new_state, tuple): - # push more than one state - for istate in new_state: - assert (istate in unprocessed or - istate in ('#pop', '#push')), \ - 'unknown new state ' + istate - return new_state - else: - assert False, 'unknown new state def %r' % new_state - - def _process_state(cls, unprocessed, processed, state): - """Preprocess a single state definition.""" - assert type(state) is str, "wrong state name %r" % state - assert state[0] != '#', "invalid state name %r" % state - if state in processed: - return processed[state] - tokens = processed[state] = [] - rflags = cls.flags - for tdef in unprocessed[state]: - if isinstance(tdef, include): - # it's a state reference - assert tdef != state, "circular state reference %r" % state - tokens.extend(cls._process_state(unprocessed, processed, - str(tdef))) - continue - if isinstance(tdef, _inherit): - # should be processed already, but may not in the case of: - # 1. the state has no counterpart in any parent - # 2. the state includes more than one 'inherit' - continue - if isinstance(tdef, default): - new_state = cls._process_new_state(tdef.state, unprocessed, processed) - tokens.append((re.compile('').match, None, new_state)) - continue - - assert type(tdef) is tuple, "wrong rule def %r" % tdef - - try: - rex = cls._process_regex(tdef[0], rflags, state) - except Exception as err: - raise ValueError("uncompilable regex %r in state %r of %r: %s" % - (tdef[0], state, cls, err)) from err - - token = cls._process_token(tdef[1]) - - if len(tdef) == 2: - new_state = None - else: - new_state = cls._process_new_state(tdef[2], - unprocessed, processed) - - tokens.append((rex, token, new_state)) - return tokens - - def process_tokendef(cls, name, tokendefs=None): - """Preprocess a dictionary of token definitions.""" - processed = cls._all_tokens[name] = {} - tokendefs = tokendefs or cls.tokens[name] - for state in list(tokendefs): - cls._process_state(tokendefs, processed, state) - return processed - - def get_tokendefs(cls): - """ - Merge tokens from superclasses in MRO order, returning a single tokendef - dictionary. - - Any state that is not defined by a subclass will be inherited - automatically. States that *are* defined by subclasses will, by - default, override that state in the superclass. If a subclass wishes to - inherit definitions from a superclass, it can use the special value - "inherit", which will cause the superclass' state definition to be - included at that point in the state. - """ - tokens = {} - inheritable = {} - for c in cls.__mro__: - toks = c.__dict__.get('tokens', {}) - - for state, items in toks.items(): - curitems = tokens.get(state) - if curitems is None: - # N.b. because this is assigned by reference, sufficiently - # deep hierarchies are processed incrementally (e.g. for - # A(B), B(C), C(RegexLexer), B will be premodified so X(B) - # will not see any inherits in B). - tokens[state] = items - try: - inherit_ndx = items.index(inherit) - except ValueError: - continue - inheritable[state] = inherit_ndx - continue - - inherit_ndx = inheritable.pop(state, None) - if inherit_ndx is None: - continue - - # Replace the "inherit" value with the items - curitems[inherit_ndx:inherit_ndx+1] = items - try: - # N.b. this is the index in items (that is, the superclass - # copy), so offset required when storing below. - new_inh_ndx = items.index(inherit) - except ValueError: - pass - else: - inheritable[state] = inherit_ndx + new_inh_ndx - - return tokens - - def __call__(cls, *args, **kwds): - """Instantiate cls after preprocessing its token definitions.""" - if '_tokens' not in cls.__dict__: - cls._all_tokens = {} - cls._tmpname = 0 - if hasattr(cls, 'token_variants') and cls.token_variants: - # don't process yet - pass - else: - cls._tokens = cls.process_tokendef('', cls.get_tokendefs()) - - return type.__call__(cls, *args, **kwds) - - -class RegexLexer(Lexer, metaclass=RegexLexerMeta): - """ - Base for simple stateful regular expression-based lexers. - Simplifies the lexing process so that you need only - provide a list of states and regular expressions. - """ - - #: Flags for compiling the regular expressions. - #: Defaults to MULTILINE. - flags = re.MULTILINE - - #: At all time there is a stack of states. Initially, the stack contains - #: a single state 'root'. The top of the stack is called "the current state". - #: - #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` - #: - #: ``new_state`` can be omitted to signify no state transition. - #: If ``new_state`` is a string, it is pushed on the stack. This ensure - #: the new current state is ``new_state``. - #: If ``new_state`` is a tuple of strings, all of those strings are pushed - #: on the stack and the current state will be the last element of the list. - #: ``new_state`` can also be ``combined('state1', 'state2', ...)`` - #: to signify a new, anonymous state combined from the rules of two - #: or more existing ones. - #: Furthermore, it can be '#pop' to signify going back one step in - #: the state stack, or '#push' to push the current state on the stack - #: again. Note that if you push while in a combined state, the combined - #: state itself is pushed, and not only the state in which the rule is - #: defined. - #: - #: The tuple can also be replaced with ``include('state')``, in which - #: case the rules from the state named by the string are included in the - #: current one. - tokens = {} - - def get_tokens_unprocessed(self, text, stack=('root',)): - """ - Split ``text`` into (tokentype, text) pairs. - - ``stack`` is the initial stack (default: ``['root']``) - """ - pos = 0 - tokendefs = self._tokens - statestack = list(stack) - statetokens = tokendefs[statestack[-1]] - while 1: - for rexmatch, action, new_state in statetokens: - m = rexmatch(text, pos) - if m: - if action is not None: - if type(action) is _TokenType: - yield pos, action, m.group() - else: - yield from action(self, m) - pos = m.end() - if new_state is not None: - # state transition - if isinstance(new_state, tuple): - for state in new_state: - if state == '#pop': - if len(statestack) > 1: - statestack.pop() - elif state == '#push': - statestack.append(statestack[-1]) - else: - statestack.append(state) - elif isinstance(new_state, int): - # pop, but keep at least one state on the stack - # (random code leading to unexpected pops should - # not allow exceptions) - if abs(new_state) >= len(statestack): - del statestack[1:] - else: - del statestack[new_state:] - elif new_state == '#push': - statestack.append(statestack[-1]) - else: - assert False, "wrong state def: %r" % new_state - statetokens = tokendefs[statestack[-1]] - break - else: - # We are here only if all state tokens have been considered - # and there was not a match on any of them. - try: - if text[pos] == '\n': - # at EOL, reset state to "root" - statestack = ['root'] - statetokens = tokendefs['root'] - yield pos, Whitespace, '\n' - pos += 1 - continue - yield pos, Error, text[pos] - pos += 1 - except IndexError: - break - - -class LexerContext: - """ - A helper object that holds lexer position data. - """ - - def __init__(self, text, pos, stack=None, end=None): - self.text = text - self.pos = pos - self.end = end or len(text) # end=0 not supported ;-) - self.stack = stack or ['root'] - - def __repr__(self): - return 'LexerContext(%r, %r, %r)' % ( - self.text, self.pos, self.stack) - - -class ExtendedRegexLexer(RegexLexer): - """ - A RegexLexer that uses a context object to store its state. - """ - - def get_tokens_unprocessed(self, text=None, context=None): - """ - Split ``text`` into (tokentype, text) pairs. - If ``context`` is given, use this lexer context instead. - """ - tokendefs = self._tokens - if not context: - ctx = LexerContext(text, 0) - statetokens = tokendefs['root'] - else: - ctx = context - statetokens = tokendefs[ctx.stack[-1]] - text = ctx.text - while 1: - for rexmatch, action, new_state in statetokens: - m = rexmatch(text, ctx.pos, ctx.end) - if m: - if action is not None: - if type(action) is _TokenType: - yield ctx.pos, action, m.group() - ctx.pos = m.end() - else: - yield from action(self, m, ctx) - if not new_state: - # altered the state stack? - statetokens = tokendefs[ctx.stack[-1]] - # CAUTION: callback must set ctx.pos! - if new_state is not None: - # state transition - if isinstance(new_state, tuple): - for state in new_state: - if state == '#pop': - if len(ctx.stack) > 1: - ctx.stack.pop() - elif state == '#push': - ctx.stack.append(ctx.stack[-1]) - else: - ctx.stack.append(state) - elif isinstance(new_state, int): - # see RegexLexer for why this check is made - if abs(new_state) >= len(ctx.stack): - del ctx.stack[1:] - else: - del ctx.stack[new_state:] - elif new_state == '#push': - ctx.stack.append(ctx.stack[-1]) - else: - assert False, "wrong state def: %r" % new_state - statetokens = tokendefs[ctx.stack[-1]] - break - else: - try: - if ctx.pos >= ctx.end: - break - if text[ctx.pos] == '\n': - # at EOL, reset state to "root" - ctx.stack = ['root'] - statetokens = tokendefs['root'] - yield ctx.pos, Text, '\n' - ctx.pos += 1 - continue - yield ctx.pos, Error, text[ctx.pos] - ctx.pos += 1 - except IndexError: - break - - -def do_insertions(insertions, tokens): - """ - Helper for lexers which must combine the results of several - sublexers. - - ``insertions`` is a list of ``(index, itokens)`` pairs. - Each ``itokens`` iterable should be inserted at position - ``index`` into the token stream given by the ``tokens`` - argument. - - The result is a combined token stream. - - TODO: clean up the code here. - """ - insertions = iter(insertions) - try: - index, itokens = next(insertions) - except StopIteration: - # no insertions - yield from tokens - return - - realpos = None - insleft = True - - # iterate over the token stream where we want to insert - # the tokens from the insertion list. - for i, t, v in tokens: - # first iteration. store the position of first item - if realpos is None: - realpos = i - oldi = 0 - while insleft and i + len(v) >= index: - tmpval = v[oldi:index - i] - if tmpval: - yield realpos, t, tmpval - realpos += len(tmpval) - for it_index, it_token, it_value in itokens: - yield realpos, it_token, it_value - realpos += len(it_value) - oldi = index - i - try: - index, itokens = next(insertions) - except StopIteration: - insleft = False - break # not strictly necessary - if oldi < len(v): - yield realpos, t, v[oldi:] - realpos += len(v) - oldi - - # leftover tokens - while insleft: - # no normal tokens, set realpos to zero - realpos = realpos or 0 - for p, t, v in itokens: - yield realpos, t, v - realpos += len(v) - try: - index, itokens = next(insertions) - except StopIteration: - insleft = False - break # not strictly necessary - - -class ProfilingRegexLexerMeta(RegexLexerMeta): - """Metaclass for ProfilingRegexLexer, collects regex timing info.""" - - def _process_regex(cls, regex, rflags, state): - if isinstance(regex, words): - rex = regex_opt(regex.words, prefix=regex.prefix, - suffix=regex.suffix) - else: - rex = regex - compiled = re.compile(rex, rflags) - - def match_func(text, pos, endpos=sys.maxsize): - info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0]) - t0 = time.time() - res = compiled.match(text, pos, endpos) - t1 = time.time() - info[0] += 1 - info[1] += t1 - t0 - return res - return match_func - - -class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta): - """Drop-in replacement for RegexLexer that does profiling of its regexes.""" - - _prof_data = [] - _prof_sort_index = 4 # defaults to time per call - - def get_tokens_unprocessed(self, text, stack=('root',)): - # this needs to be a stack, since using(this) will produce nested calls - self.__class__._prof_data.append({}) - yield from RegexLexer.get_tokens_unprocessed(self, text, stack) - rawdata = self.__class__._prof_data.pop() - data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65], - n, 1000 * t, 1000 * t / n) - for ((s, r), (n, t)) in rawdata.items()), - key=lambda x: x[self._prof_sort_index], - reverse=True) - sum_total = sum(x[3] for x in data) - - print() - print('Profiling result for %s lexing %d chars in %.3f ms' % - (self.__class__.__name__, len(text), sum_total)) - print('=' * 110) - print('%-20s %-64s ncalls tottime percall' % ('state', 'regex')) - print('-' * 110) - for d in data: - print('%-20s %-65s %5d %8.4f %8.4f' % d) - print('=' * 110) diff --git a/spaces/pknez/face-swap-docker/settings.py b/spaces/pknez/face-swap-docker/settings.py deleted file mode 100644 index 6d506d2bfd95b4a4898e441283ea4490edbff19e..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/settings.py +++ /dev/null @@ -1,56 +0,0 @@ -import yaml - -class Settings: - def __init__(self, config_file): - self.config_file = config_file - self.load() - - def default_get(_, data, name, default): - value = default - try: - value = data.get(name, default) - except: - pass - return value - - - def load(self): - try: - with open(self.config_file, 'r') as f: - data = yaml.load(f, Loader=yaml.FullLoader) - except: - data = None - - self.selected_theme = self.default_get(data, 'selected_theme', "Default") - self.server_name = self.default_get(data, 'server_name', "") - self.server_port = self.default_get(data, 'server_port', 0) - self.server_share = self.default_get(data, 'server_share', True) - self.output_image_format = self.default_get(data, 'output_image_format', 'png') - self.output_video_format = self.default_get(data, 'output_video_format', 'mp4') - self.output_video_codec = self.default_get(data, 'output_video_codec', 'libx264') - self.video_quality = self.default_get(data, 'video_quality', 14) - self.clear_output = self.default_get(data, 'clear_output', True) - self.max_threads = self.default_get(data, 'max_threads', 8) - self.provider = self.default_get(data, 'provider', 'cuda') - - - - def save(self): - data = { - 'selected_theme': self.selected_theme, - 'server_name': self.server_name, - 'server_port': self.server_port, - 'server_share': self.server_share, - 'output_image_format' : self.output_image_format, - 'output_video_format' : self.output_video_format, - 'output_video_codec' : self.output_video_codec, - 'video_quality' : self.video_quality, - 'clear_output' : self.clear_output, - 'max_threads' : self.max_threads, - 'provider' : self.provider - } - with open(self.config_file, 'w') as f: - yaml.dump(data, f) - - - diff --git a/spaces/prerna9811/Chord/portaudio/qa/loopback/src/paqa.c b/spaces/prerna9811/Chord/portaudio/qa/loopback/src/paqa.c deleted file mode 100644 index 5eb628336263deb58540cda7f8401f30a720f799..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/qa/loopback/src/paqa.c +++ /dev/null @@ -1,1601 +0,0 @@ - -/* - * PortAudio Portable Real-Time Audio Library - * Latest Version at: http://www.portaudio.com - * - * Copyright (c) 1999-2010 Phil Burk and Ross Bencina - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include -#include -#include -#include - -#include "portaudio.h" - -#include "qa_tools.h" - -#include "paqa_tools.h" -#include "audio_analyzer.h" -#include "test_audio_analyzer.h" - -/** Accumulate counts for how many tests pass or fail. */ -int g_testsPassed = 0; -int g_testsFailed = 0; - -#define MAX_NUM_GENERATORS (8) -#define MAX_NUM_RECORDINGS (8) -#define MAX_BACKGROUND_NOISE_RMS (0.0004) -#define LOOPBACK_DETECTION_DURATION_SECONDS (0.8) -#define DEFAULT_FRAMES_PER_BUFFER (0) -#define PAQA_WAIT_STREAM_MSEC (100) -#define PAQA_TEST_DURATION (1.2) - -// Use two separate streams instead of one full duplex stream. -#define PAQA_FLAG_TWO_STREAMS (1<<0) -// Use bloching read/write for loopback. -#define PAQA_FLAG_USE_BLOCKING_IO (1<<1) - -const char * s_FlagOnNames[] = -{ - "Two Streams (Half Duplex)", - "Blocking Read/Write" -}; - -const char * s_FlagOffNames[] = -{ - "One Stream (Full Duplex)", - "Callback" -}; - - -/** Parameters that describe a single test run. */ -typedef struct TestParameters_s -{ - PaStreamParameters inputParameters; - PaStreamParameters outputParameters; - double sampleRate; - int samplesPerFrame; - int framesPerBuffer; - int maxFrames; - double baseFrequency; - double amplitude; - PaStreamFlags streamFlags; // paClipOff, etc - int flags; // PAQA_FLAG_TWO_STREAMS, PAQA_FLAG_USE_BLOCKING_IO -} TestParameters; - -typedef struct LoopbackContext_s -{ - // Generate a unique signal on each channel. - PaQaSineGenerator generators[MAX_NUM_GENERATORS]; - // Record each channel individually. - PaQaRecording recordings[MAX_NUM_RECORDINGS]; - - // Reported by the stream after it's opened - PaTime streamInfoInputLatency; - PaTime streamInfoOutputLatency; - - // Measured at runtime. - volatile int callbackCount; // incremented for each callback - volatile int inputBufferCount; // incremented if input buffer not NULL - int inputUnderflowCount; - int inputOverflowCount; - - volatile int outputBufferCount; // incremented if output buffer not NULL - int outputOverflowCount; - int outputUnderflowCount; - - // Measure whether input or output is lagging behind. - volatile int minInputOutputDelta; - volatile int maxInputOutputDelta; - - int minFramesPerBuffer; - int maxFramesPerBuffer; - int primingCount; - TestParameters *test; - volatile int done; -} LoopbackContext; - -typedef struct UserOptions_s -{ - int sampleRate; - int framesPerBuffer; - int inputLatency; - int outputLatency; - int saveBadWaves; - int verbose; - int waveFileCount; - const char *waveFilePath; - PaDeviceIndex inputDevice; - PaDeviceIndex outputDevice; -} UserOptions; - -#define BIG_BUFFER_SIZE (sizeof(float) * 2 * 2 * 1024) -static unsigned char g_ReadWriteBuffer[BIG_BUFFER_SIZE]; - -#define MAX_CONVERSION_SAMPLES (2 * 32 * 1024) -#define CONVERSION_BUFFER_SIZE (sizeof(float) * 2 * MAX_CONVERSION_SAMPLES) -static unsigned char g_ConversionBuffer[CONVERSION_BUFFER_SIZE]; - -/*******************************************************************/ -static int RecordAndPlaySinesCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void *userData ) -{ - int i; - LoopbackContext *loopbackContext = (LoopbackContext *) userData; - - - loopbackContext->callbackCount += 1; - if( statusFlags & paInputUnderflow ) loopbackContext->inputUnderflowCount += 1; - if( statusFlags & paInputOverflow ) loopbackContext->inputOverflowCount += 1; - if( statusFlags & paOutputUnderflow ) loopbackContext->outputUnderflowCount += 1; - if( statusFlags & paOutputOverflow ) loopbackContext->outputOverflowCount += 1; - if( statusFlags & paPrimingOutput ) loopbackContext->primingCount += 1; - if( framesPerBuffer > loopbackContext->maxFramesPerBuffer ) - { - loopbackContext->maxFramesPerBuffer = framesPerBuffer; - } - if( framesPerBuffer < loopbackContext->minFramesPerBuffer ) - { - loopbackContext->minFramesPerBuffer = framesPerBuffer; - } - - /* This may get called with NULL inputBuffer during initial setup. - * We may also use the same callback with output only streams. - */ - if( inputBuffer != NULL) - { - int channelsPerFrame = loopbackContext->test->inputParameters.channelCount; - float *in = (float *)inputBuffer; - PaSampleFormat inFormat = loopbackContext->test->inputParameters.sampleFormat; - - loopbackContext->inputBufferCount += 1; - - if( inFormat != paFloat32 ) - { - int samplesToConvert = framesPerBuffer * channelsPerFrame; - in = (float *) g_ConversionBuffer; - if( samplesToConvert > MAX_CONVERSION_SAMPLES ) - { - // Hack to prevent buffer overflow. - // @todo Loop with small buffer instead of failing. - printf("Format conversion buffer too small!\n"); - return paComplete; - } - PaQa_ConvertToFloat( inputBuffer, samplesToConvert, inFormat, (float *) g_ConversionBuffer ); - } - - // Read each channel from the buffer. - for( i=0; idone |= PaQa_WriteRecording( &loopbackContext->recordings[i], - in + i, - framesPerBuffer, - channelsPerFrame ); - } - } - - if( outputBuffer != NULL ) - { - int channelsPerFrame = loopbackContext->test->outputParameters.channelCount; - float *out = (float *)outputBuffer; - PaSampleFormat outFormat = loopbackContext->test->outputParameters.sampleFormat; - - loopbackContext->outputBufferCount += 1; - - if( outFormat != paFloat32 ) - { - // If we need to convert then mix to the g_ConversionBuffer and then convert into the PA outputBuffer. - out = (float *) g_ConversionBuffer; - } - - PaQa_EraseBuffer( out, framesPerBuffer, channelsPerFrame ); - for( i=0; igenerators[i], - out + i, - framesPerBuffer, - channelsPerFrame ); - } - - if( outFormat != paFloat32 ) - { - int samplesToConvert = framesPerBuffer * channelsPerFrame; - if( samplesToConvert > MAX_CONVERSION_SAMPLES ) - { - printf("Format conversion buffer too small!\n"); - return paComplete; - } - PaQa_ConvertFromFloat( out, framesPerBuffer * channelsPerFrame, outFormat, outputBuffer ); - } - - } - - // Measure whether the input or output are lagging behind. - // Don't measure lag at end. - if( !loopbackContext->done ) - { - int inputOutputDelta = loopbackContext->inputBufferCount - loopbackContext->outputBufferCount; - if( loopbackContext->maxInputOutputDelta < inputOutputDelta ) - { - loopbackContext->maxInputOutputDelta = inputOutputDelta; - } - if( loopbackContext->minInputOutputDelta > inputOutputDelta ) - { - loopbackContext->minInputOutputDelta = inputOutputDelta; - } - } - - return loopbackContext->done ? paComplete : paContinue; -} - -static void CopyStreamInfoToLoopbackContext( LoopbackContext *loopbackContext, PaStream *inputStream, PaStream *outputStream ) -{ - const PaStreamInfo *inputStreamInfo = Pa_GetStreamInfo( inputStream ); - const PaStreamInfo *outputStreamInfo = Pa_GetStreamInfo( outputStream ); - - loopbackContext->streamInfoInputLatency = inputStreamInfo ? inputStreamInfo->inputLatency : -1; - loopbackContext->streamInfoOutputLatency = outputStreamInfo ? outputStreamInfo->outputLatency : -1; -} - -/*******************************************************************/ -/** - * Open a full duplex audio stream. - * Generate sine waves on the output channels and record the input channels. - * Then close the stream. - * @return 0 if OK or negative error. - */ -int PaQa_RunLoopbackFullDuplex( LoopbackContext *loopbackContext ) -{ - PaStream *stream = NULL; - PaError err = 0; - TestParameters *test = loopbackContext->test; - loopbackContext->done = 0; - // Use one full duplex stream. - err = Pa_OpenStream( - &stream, - &test->inputParameters, - &test->outputParameters, - test->sampleRate, - test->framesPerBuffer, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - RecordAndPlaySinesCallback, - loopbackContext ); - if( err != paNoError ) goto error; - - CopyStreamInfoToLoopbackContext( loopbackContext, stream, stream ); - - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error; - - // Wait for stream to finish. - while( loopbackContext->done == 0 ) - { - Pa_Sleep(PAQA_WAIT_STREAM_MSEC); - } - - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error; - - err = Pa_CloseStream( stream ); - if( err != paNoError ) goto error; - - return 0; - -error: - return err; -} - -/*******************************************************************/ -/** - * Open two audio streams, one for input and one for output. - * Generate sine waves on the output channels and record the input channels. - * Then close the stream. - * @return 0 if OK or paTimedOut. - */ - -int PaQa_WaitForStream( LoopbackContext *loopbackContext ) -{ - int timeoutMSec = 1000 * PAQA_TEST_DURATION * 2; - - // Wait for stream to finish or timeout. - while( (loopbackContext->done == 0) && (timeoutMSec > 0) ) - { - Pa_Sleep(PAQA_WAIT_STREAM_MSEC); - timeoutMSec -= PAQA_WAIT_STREAM_MSEC; - } - - if( loopbackContext->done == 0 ) - { - printf("ERROR - stream completion timed out!"); - return paTimedOut; - } - return 0; -} - -/*******************************************************************/ -/** - * Open two audio streams, one for input and one for output. - * Generate sine waves on the output channels and record the input channels. - * Then close the stream. - * @return 0 if OK or negative error. - */ -int PaQa_RunLoopbackHalfDuplex( LoopbackContext *loopbackContext ) -{ - PaStream *inStream = NULL; - PaStream *outStream = NULL; - PaError err = 0; - int timedOut = 0; - TestParameters *test = loopbackContext->test; - loopbackContext->done = 0; - - // Use two half duplex streams. - err = Pa_OpenStream( - &inStream, - &test->inputParameters, - NULL, - test->sampleRate, - test->framesPerBuffer, - test->streamFlags, - RecordAndPlaySinesCallback, - loopbackContext ); - if( err != paNoError ) goto error; - err = Pa_OpenStream( - &outStream, - NULL, - &test->outputParameters, - test->sampleRate, - test->framesPerBuffer, - test->streamFlags, - RecordAndPlaySinesCallback, - loopbackContext ); - if( err != paNoError ) goto error; - - CopyStreamInfoToLoopbackContext( loopbackContext, inStream, outStream ); - - err = Pa_StartStream( inStream ); - if( err != paNoError ) goto error; - - // Start output later so we catch the beginning of the waveform. - err = Pa_StartStream( outStream ); - if( err != paNoError ) goto error; - - timedOut = PaQa_WaitForStream( loopbackContext ); - - err = Pa_StopStream( inStream ); - if( err != paNoError ) goto error; - - err = Pa_StopStream( outStream ); - if( err != paNoError ) goto error; - - err = Pa_CloseStream( inStream ); - if( err != paNoError ) goto error; - - err = Pa_CloseStream( outStream ); - if( err != paNoError ) goto error; - - return timedOut; - -error: - return err; -} - - -/*******************************************************************/ -/** - * Open one audio streams, just for input. - * Record background level. - * Then close the stream. - * @return 0 if OK or negative error. - */ -int PaQa_RunInputOnly( LoopbackContext *loopbackContext ) -{ - PaStream *inStream = NULL; - PaError err = 0; - int timedOut = 0; - TestParameters *test = loopbackContext->test; - loopbackContext->done = 0; - - // Just open an input stream. - err = Pa_OpenStream( - &inStream, - &test->inputParameters, - NULL, - test->sampleRate, - test->framesPerBuffer, - paClipOff, /* We won't output out of range samples so don't bother clipping them. */ - RecordAndPlaySinesCallback, - loopbackContext ); - if( err != paNoError ) goto error; - - err = Pa_StartStream( inStream ); - if( err != paNoError ) goto error; - - timedOut = PaQa_WaitForStream( loopbackContext ); - - err = Pa_StopStream( inStream ); - if( err != paNoError ) goto error; - - err = Pa_CloseStream( inStream ); - if( err != paNoError ) goto error; - - return timedOut; - -error: - return err; -} - -/*******************************************************************/ -static int RecordAndPlayBlockingIO( PaStream *inStream, - PaStream *outStream, - LoopbackContext *loopbackContext - ) -{ - int i; - float *in = (float *)g_ReadWriteBuffer; - float *out = (float *)g_ReadWriteBuffer; - PaError err; - int done = 0; - long available; - const long maxPerBuffer = 64; - TestParameters *test = loopbackContext->test; - long framesPerBuffer = test->framesPerBuffer; - if( framesPerBuffer <= 0 ) - { - framesPerBuffer = maxPerBuffer; // bigger values might run past end of recording - } - - // Read in audio. - err = Pa_ReadStream( inStream, in, framesPerBuffer ); - // Ignore an overflow on the first read. - //if( !((loopbackContext->callbackCount == 0) && (err == paInputOverflowed)) ) - if( err != paInputOverflowed ) - { - QA_ASSERT_EQUALS( "Pa_ReadStream failed", paNoError, err ); - } - else - { - loopbackContext->inputOverflowCount += 1; - } - - - // Save in a recording. - for( i=0; itest->inputParameters.channelCount; i++ ) - { - done |= PaQa_WriteRecording( &loopbackContext->recordings[i], - in + i, - framesPerBuffer, - loopbackContext->test->inputParameters.channelCount ); - } - - // Synthesize audio. - available = Pa_GetStreamWriteAvailable( outStream ); - if( available > (2*framesPerBuffer) ) available = (2*framesPerBuffer); - PaQa_EraseBuffer( out, available, loopbackContext->test->outputParameters.channelCount ); - for( i=0; itest->outputParameters.channelCount; i++ ) - { - PaQa_MixSine( &loopbackContext->generators[i], - out + i, - available, - loopbackContext->test->outputParameters.channelCount ); - } - - // Write out audio. - err = Pa_WriteStream( outStream, out, available ); - // Ignore an underflow on the first write. - //if( !((loopbackContext->callbackCount == 0) && (err == paOutputUnderflowed)) ) - if( err != paOutputUnderflowed ) - { - QA_ASSERT_EQUALS( "Pa_WriteStream failed", paNoError, err ); - } - else - { - loopbackContext->outputUnderflowCount += 1; - } - - - loopbackContext->callbackCount += 1; - - return done; -error: - return err; -} - - -/*******************************************************************/ -/** - * Open two audio streams with non-blocking IO. - * Generate sine waves on the output channels and record the input channels. - * Then close the stream. - * @return 0 if OK or negative error. - */ -int PaQa_RunLoopbackHalfDuplexBlockingIO( LoopbackContext *loopbackContext ) -{ - PaStream *inStream = NULL; - PaStream *outStream = NULL; - PaError err = 0; - TestParameters *test = loopbackContext->test; - - // Use two half duplex streams. - err = Pa_OpenStream( - &inStream, - &test->inputParameters, - NULL, - test->sampleRate, - test->framesPerBuffer, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - NULL, // causes non-blocking IO - NULL ); - if( err != paNoError ) goto error1; - err = Pa_OpenStream( - &outStream, - NULL, - &test->outputParameters, - test->sampleRate, - test->framesPerBuffer, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - NULL, // causes non-blocking IO - NULL ); - if( err != paNoError ) goto error2; - - CopyStreamInfoToLoopbackContext( loopbackContext, inStream, outStream ); - - err = Pa_StartStream( outStream ); - if( err != paNoError ) goto error3; - - err = Pa_StartStream( inStream ); - if( err != paNoError ) goto error3; - - while( err == 0 ) - { - err = RecordAndPlayBlockingIO( inStream, outStream, loopbackContext ); - if( err < 0 ) goto error3; - } - - err = Pa_StopStream( inStream ); - if( err != paNoError ) goto error3; - - err = Pa_StopStream( outStream ); - if( err != paNoError ) goto error3; - - err = Pa_CloseStream( outStream ); - if( err != paNoError ) goto error2; - - err = Pa_CloseStream( inStream ); - if( err != paNoError ) goto error1; - - - return 0; - -error3: - Pa_CloseStream( outStream ); -error2: - Pa_CloseStream( inStream ); -error1: - return err; -} - - -/*******************************************************************/ -/** - * Open one audio stream with non-blocking IO. - * Generate sine waves on the output channels and record the input channels. - * Then close the stream. - * @return 0 if OK or negative error. - */ -int PaQa_RunLoopbackFullDuplexBlockingIO( LoopbackContext *loopbackContext ) -{ - PaStream *stream = NULL; - PaError err = 0; - TestParameters *test = loopbackContext->test; - - // Use one full duplex stream. - err = Pa_OpenStream( - &stream, - &test->inputParameters, - &test->outputParameters, - test->sampleRate, - test->framesPerBuffer, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - NULL, // causes non-blocking IO - NULL ); - if( err != paNoError ) goto error1; - - CopyStreamInfoToLoopbackContext( loopbackContext, stream, stream ); - - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error2; - - while( err == 0 ) - { - err = RecordAndPlayBlockingIO( stream, stream, loopbackContext ); - if( err < 0 ) goto error2; - } - - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error2; - - - err = Pa_CloseStream( stream ); - if( err != paNoError ) goto error1; - - - return 0; - -error2: - Pa_CloseStream( stream ); -error1: - return err; -} - - -/*******************************************************************/ -/** - * Run some kind of loopback test. - * @return 0 if OK or negative error. - */ -int PaQa_RunLoopback( LoopbackContext *loopbackContext ) -{ - PaError err = 0; - TestParameters *test = loopbackContext->test; - - - if( test->flags & PAQA_FLAG_TWO_STREAMS ) - { - if( test->flags & PAQA_FLAG_USE_BLOCKING_IO ) - { - err = PaQa_RunLoopbackHalfDuplexBlockingIO( loopbackContext ); - } - else - { - err = PaQa_RunLoopbackHalfDuplex( loopbackContext ); - } - } - else - { - if( test->flags & PAQA_FLAG_USE_BLOCKING_IO ) - { - err = PaQa_RunLoopbackFullDuplexBlockingIO( loopbackContext ); - } - else - { - err = PaQa_RunLoopbackFullDuplex( loopbackContext ); - } - } - - if( err != paNoError ) - { - printf("PortAudio error = %s\n", Pa_GetErrorText( err ) ); - } - return err; -} - -/*******************************************************************/ -static int PaQa_SaveTestResultToWaveFile( UserOptions *userOptions, PaQaRecording *recording ) -{ - if( userOptions->saveBadWaves ) - { - char filename[256]; -#ifdef WIN32 - _snprintf( filename, sizeof(filename), "%s\\paloopback_%d.wav", userOptions->waveFilePath, userOptions->waveFileCount++ ); -#else - snprintf( filename, sizeof(filename), "%s/paloopback_%d.wav", userOptions->waveFilePath, userOptions->waveFileCount++ ); -#endif - printf( "\"%s\", ", filename ); - return PaQa_SaveRecordingToWaveFile( recording, filename ); - } - return 0; -} - -/*******************************************************************/ -static int PaQa_SetupLoopbackContext( LoopbackContext *loopbackContextPtr, TestParameters *testParams ) -{ - int i; - // Setup loopback context. - memset( loopbackContextPtr, 0, sizeof(LoopbackContext) ); - loopbackContextPtr->test = testParams; - for( i=0; isamplesPerFrame; i++ ) - { - int err = PaQa_InitializeRecording( &loopbackContextPtr->recordings[i], testParams->maxFrames, testParams->sampleRate ); - QA_ASSERT_EQUALS( "PaQa_InitializeRecording failed", paNoError, err ); - } - for( i=0; isamplesPerFrame; i++ ) - { - PaQa_SetupSineGenerator( &loopbackContextPtr->generators[i], PaQa_GetNthFrequency( testParams->baseFrequency, i ), - testParams->amplitude, testParams->sampleRate ); - } - loopbackContextPtr->minFramesPerBuffer = 0x0FFFFFFF; - return 0; -error: - return -1; -} - -/*******************************************************************/ -static void PaQa_TeardownLoopbackContext( LoopbackContext *loopbackContextPtr ) -{ - int i; - if( loopbackContextPtr->test != NULL ) - { - for( i=0; itest->samplesPerFrame; i++ ) - { - PaQa_TerminateRecording( &loopbackContextPtr->recordings[i] ); - } - } -} - -/*******************************************************************/ -static void PaQa_PrintShortErrorReport( PaQaAnalysisResult *analysisResultPtr, int channel ) -{ - printf("channel %d ", channel); - if( analysisResultPtr->popPosition > 0 ) - { - printf("POP %0.3f at %d, ", (double)analysisResultPtr->popAmplitude, (int)analysisResultPtr->popPosition ); - } - else - { - if( analysisResultPtr->addedFramesPosition > 0 ) - { - printf("ADD %d at %d ", (int)analysisResultPtr->numAddedFrames, (int)analysisResultPtr->addedFramesPosition ); - } - - if( analysisResultPtr->droppedFramesPosition > 0 ) - { - printf("DROP %d at %d ", (int)analysisResultPtr->numDroppedFrames, (int)analysisResultPtr->droppedFramesPosition ); - } - } -} - -/*******************************************************************/ -static void PaQa_PrintFullErrorReport( PaQaAnalysisResult *analysisResultPtr, int channel ) -{ - printf("\n=== Loopback Analysis ===================\n"); - printf(" channel: %d\n", channel ); - printf(" latency: %10.3f\n", analysisResultPtr->latency ); - printf(" amplitudeRatio: %10.3f\n", (double)analysisResultPtr->amplitudeRatio ); - printf(" popPosition: %10.3f\n", (double)analysisResultPtr->popPosition ); - printf(" popAmplitude: %10.3f\n", (double)analysisResultPtr->popAmplitude ); - printf(" num added frames: %10.3f\n", analysisResultPtr->numAddedFrames ); - printf(" added frames at: %10.3f\n", analysisResultPtr->addedFramesPosition ); - printf(" num dropped frames: %10.3f\n", analysisResultPtr->numDroppedFrames ); - printf(" dropped frames at: %10.3f\n", analysisResultPtr->droppedFramesPosition ); -} - -/*******************************************************************/ -/** - * Test loopback connection using the given parameters. - * @return number of channels with glitches, or negative error. - */ -static int PaQa_SingleLoopBackTest( UserOptions *userOptions, TestParameters *testParams ) -{ - int i; - LoopbackContext loopbackContext; - PaError err = paNoError; - PaQaTestTone testTone; - PaQaAnalysisResult analysisResult; - int numBadChannels = 0; - - printf("| %5d | %6d | ", ((int)(testParams->sampleRate+0.5)), testParams->framesPerBuffer ); - fflush(stdout); - - testTone.samplesPerFrame = testParams->samplesPerFrame; - testTone.sampleRate = testParams->sampleRate; - testTone.amplitude = testParams->amplitude; - testTone.startDelay = 0; - - err = PaQa_SetupLoopbackContext( &loopbackContext, testParams ); - if( err ) return err; - - err = PaQa_RunLoopback( &loopbackContext ); - QA_ASSERT_TRUE("loopback did not run", (loopbackContext.callbackCount > 1) ); - - printf( "%7.2f %7.2f %7.2f | ", - loopbackContext.streamInfoInputLatency * 1000.0, - loopbackContext.streamInfoOutputLatency * 1000.0, - (loopbackContext.streamInfoInputLatency + loopbackContext.streamInfoOutputLatency) * 1000.0 - ); - - printf( "%4d/%4d/%4d, %4d/%4d/%4d | ", - loopbackContext.inputOverflowCount, - loopbackContext.inputUnderflowCount, - loopbackContext.inputBufferCount, - loopbackContext.outputOverflowCount, - loopbackContext.outputUnderflowCount, - loopbackContext.outputBufferCount - ); - - // Analyse recording to detect glitches. - for( i=0; isamplesPerFrame; i++ ) - { - double freq = PaQa_GetNthFrequency( testParams->baseFrequency, i ); - testTone.frequency = freq; - - PaQa_AnalyseRecording( &loopbackContext.recordings[i], &testTone, &analysisResult ); - - if( i==0 ) - { - double latencyMSec; - - printf( "%4d-%4d | ", - loopbackContext.minFramesPerBuffer, - loopbackContext.maxFramesPerBuffer - ); - - latencyMSec = 1000.0 * analysisResult.latency / testParams->sampleRate; - printf("%7.2f | ", latencyMSec ); - - } - - if( analysisResult.valid ) - { - int badChannel = ( (analysisResult.popPosition > 0) - || (analysisResult.addedFramesPosition > 0) - || (analysisResult.droppedFramesPosition > 0) ); - - if( badChannel ) - { - if( userOptions->verbose ) - { - PaQa_PrintFullErrorReport( &analysisResult, i ); - } - else - { - PaQa_PrintShortErrorReport( &analysisResult, i ); - } - PaQa_SaveTestResultToWaveFile( userOptions, &loopbackContext.recordings[i] ); - } - numBadChannels += badChannel; - } - else - { - printf( "[%d] No or low signal, ampRatio = %f", i, analysisResult.amplitudeRatio ); - numBadChannels += 1; - } - - } - if( numBadChannels == 0 ) - { - printf( "OK" ); - } - - // Print the # errors so far to make it easier to see where the error occurred. - printf( " - #errs = %d\n", g_testsFailed ); - - PaQa_TeardownLoopbackContext( &loopbackContext ); - if( numBadChannels > 0 ) - { - g_testsFailed += 1; - } - return numBadChannels; - -error: - PaQa_TeardownLoopbackContext( &loopbackContext ); - printf( "\n" ); - g_testsFailed += 1; - return err; -} - -/*******************************************************************/ -static void PaQa_SetDefaultTestParameters( TestParameters *testParamsPtr, PaDeviceIndex inputDevice, PaDeviceIndex outputDevice ) -{ - memset( testParamsPtr, 0, sizeof(TestParameters) ); - - testParamsPtr->samplesPerFrame = 2; - testParamsPtr->amplitude = 0.5; - testParamsPtr->sampleRate = 44100; - testParamsPtr->maxFrames = (int) (PAQA_TEST_DURATION * testParamsPtr->sampleRate); - testParamsPtr->framesPerBuffer = DEFAULT_FRAMES_PER_BUFFER; - testParamsPtr->baseFrequency = 200.0; - testParamsPtr->flags = PAQA_FLAG_TWO_STREAMS; - testParamsPtr->streamFlags = paClipOff; /* we won't output out of range samples so don't bother clipping them */ - - testParamsPtr->inputParameters.device = inputDevice; - testParamsPtr->inputParameters.sampleFormat = paFloat32; - testParamsPtr->inputParameters.channelCount = testParamsPtr->samplesPerFrame; - testParamsPtr->inputParameters.suggestedLatency = Pa_GetDeviceInfo( inputDevice )->defaultLowInputLatency; - //testParamsPtr->inputParameters.suggestedLatency = Pa_GetDeviceInfo( inputDevice )->defaultHighInputLatency; - - testParamsPtr->outputParameters.device = outputDevice; - testParamsPtr->outputParameters.sampleFormat = paFloat32; - testParamsPtr->outputParameters.channelCount = testParamsPtr->samplesPerFrame; - testParamsPtr->outputParameters.suggestedLatency = Pa_GetDeviceInfo( outputDevice )->defaultLowOutputLatency; - //testParamsPtr->outputParameters.suggestedLatency = Pa_GetDeviceInfo( outputDevice )->defaultHighOutputLatency; -} - -/*******************************************************************/ -static void PaQa_OverrideTestParameters( TestParameters *testParamsPtr, UserOptions *userOptions ) -{ - // Check to see if a specific value was requested. - if( userOptions->sampleRate >= 0 ) - { - testParamsPtr->sampleRate = userOptions->sampleRate; - testParamsPtr->maxFrames = (int) (PAQA_TEST_DURATION * testParamsPtr->sampleRate); - } - if( userOptions->framesPerBuffer >= 0 ) - { - testParamsPtr->framesPerBuffer = userOptions->framesPerBuffer; - } - if( userOptions->inputLatency >= 0 ) - { - testParamsPtr->inputParameters.suggestedLatency = userOptions->inputLatency * 0.001; - } - if( userOptions->outputLatency >= 0 ) - { - testParamsPtr->outputParameters.suggestedLatency = userOptions->outputLatency * 0.001; - } - printf( " Running with suggested latency (msec): input = %5.2f, out = %5.2f\n", - (testParamsPtr->inputParameters.suggestedLatency * 1000.0), - (testParamsPtr->outputParameters.suggestedLatency * 1000.0) ); -} - -/*******************************************************************/ -/** - * Run a series of tests on this loopback connection. - * @return number of bad channel results - */ -static int PaQa_AnalyzeLoopbackConnection( UserOptions *userOptions, PaDeviceIndex inputDevice, PaDeviceIndex outputDevice ) -{ - int iFlags; - int iRate; - int iSize; - int iFormat; - int savedValue; - TestParameters testParams; - const PaDeviceInfo *inputDeviceInfo = Pa_GetDeviceInfo( inputDevice ); - const PaDeviceInfo *outputDeviceInfo = Pa_GetDeviceInfo( outputDevice ); - int totalBadChannels = 0; - - // test half duplex first because it is more likely to work. - int flagSettings[] = { PAQA_FLAG_TWO_STREAMS, 0 }; - int numFlagSettings = (sizeof(flagSettings)/sizeof(int)); - - double sampleRates[] = { 8000.0, 11025.0, 16000.0, 22050.0, 32000.0, 44100.0, 48000.0, 96000.0 }; - int numRates = (sizeof(sampleRates)/sizeof(double)); - - // framesPerBuffer==0 means PA decides on the buffer size. - int framesPerBuffers[] = { 0, 16, 32, 40, 64, 100, 128, 256, 512, 1024 }; - int numBufferSizes = (sizeof(framesPerBuffers)/sizeof(int)); - - PaSampleFormat sampleFormats[] = { paFloat32, paUInt8, paInt8, paInt16, paInt32 }; - const char *sampleFormatNames[] = { "paFloat32", "paUInt8", "paInt8", "paInt16", "paInt32" }; - int numSampleFormats = (sizeof(sampleFormats)/sizeof(PaSampleFormat)); - - printf( "=============== Analysing Loopback %d to %d =====================\n", outputDevice, inputDevice ); - printf( " Devices: %s => %s\n", outputDeviceInfo->name, inputDeviceInfo->name); - - PaQa_SetDefaultTestParameters( &testParams, inputDevice, outputDevice ); - - PaQa_OverrideTestParameters( &testParams, userOptions ); - - // Loop though combinations of audio parameters. - for( iFlags=0; iFlagssampleRate < 0 ) - { - savedValue = testParams.sampleRate; - for( iRate=0; iRateframesPerBuffer < 0 ) - { - savedValue = testParams.framesPerBuffer; - for( iSize=0; iSizetest; - - // Start in the middle assuming past latency. - int startFrame = testParamsPtr->maxFrames/2; - int numFrames = testParamsPtr->maxFrames/2; - - // Check to see if the signal is clipped. - double amplitudeLeft = PaQa_MeasureSineAmplitudeBySlope( &loopbackContextPtr->recordings[0], - testParamsPtr->baseFrequency, testParamsPtr->sampleRate, - startFrame, numFrames ); - double gainLeft = amplitudeLeft / testParamsPtr->amplitude; - double amplitudeRight = PaQa_MeasureSineAmplitudeBySlope( &loopbackContextPtr->recordings[1], - testParamsPtr->baseFrequency, testParamsPtr->sampleRate, - startFrame, numFrames ); - double gainRight = amplitudeLeft / testParamsPtr->amplitude; - printf(" Loop gain: left = %f, right = %f\n", gainLeft, gainRight ); - - if( (amplitudeLeft > 1.0 ) || (amplitudeRight > 1.0) ) - { - printf("ERROR - loop gain is too high. Should be around than 1.0. Please lower output level and/or input gain.\n" ); - clipped = 1; - } - return clipped; -} - -/*******************************************************************/ -int PaQa_MeasureBackgroundNoise( LoopbackContext *loopbackContextPtr, double *rmsPtr ) -{ - int result = 0; - *rmsPtr = 0.0; - // Rewind so we can record some input. - loopbackContextPtr->recordings[0].numFrames = 0; - loopbackContextPtr->recordings[1].numFrames = 0; - result = PaQa_RunInputOnly( loopbackContextPtr ); - if( result == 0 ) - { - double leftRMS = PaQa_MeasureRootMeanSquare( loopbackContextPtr->recordings[0].buffer, - loopbackContextPtr->recordings[0].numFrames ); - double rightRMS = PaQa_MeasureRootMeanSquare( loopbackContextPtr->recordings[1].buffer, - loopbackContextPtr->recordings[1].numFrames ); - *rmsPtr = (leftRMS + rightRMS) / 2.0; - } - return result; -} - -/*******************************************************************/ -/** - * Output a sine wave then try to detect it on input. - * - * @return 1 if loopback connected, 0 if not, or negative error. - */ -int PaQa_CheckForLoopBack( UserOptions *userOptions, PaDeviceIndex inputDevice, PaDeviceIndex outputDevice ) -{ - TestParameters testParams; - LoopbackContext loopbackContext; - const PaDeviceInfo *inputDeviceInfo; - const PaDeviceInfo *outputDeviceInfo; - PaError err = paNoError; - double minAmplitude; - int loopbackIsConnected; - int startFrame, numFrames; - double magLeft, magRight; - - inputDeviceInfo = Pa_GetDeviceInfo( inputDevice ); - if( inputDeviceInfo == NULL ) - { - printf("ERROR - Pa_GetDeviceInfo for input returned NULL.\n"); - return paInvalidDevice; - } - if( inputDeviceInfo->maxInputChannels < 2 ) - { - return 0; - } - - outputDeviceInfo = Pa_GetDeviceInfo( outputDevice ); - if( outputDeviceInfo == NULL ) - { - printf("ERROR - Pa_GetDeviceInfo for output returned NULL.\n"); - return paInvalidDevice; - } - if( outputDeviceInfo->maxOutputChannels < 2 ) - { - return 0; - } - - printf( "Look for loopback cable between \"%s\" => \"%s\"\n", outputDeviceInfo->name, inputDeviceInfo->name); - - printf( " Default suggested input latency (msec): low = %5.2f, high = %5.2f\n", - (inputDeviceInfo->defaultLowInputLatency * 1000.0), - (inputDeviceInfo->defaultHighInputLatency * 1000.0) ); - printf( " Default suggested output latency (msec): low = %5.2f, high = %5.2f\n", - (outputDeviceInfo->defaultLowOutputLatency * 1000.0), - (outputDeviceInfo->defaultHighOutputLatency * 1000.0) ); - - PaQa_SetDefaultTestParameters( &testParams, inputDevice, outputDevice ); - - PaQa_OverrideTestParameters( &testParams, userOptions ); - - testParams.maxFrames = (int) (LOOPBACK_DETECTION_DURATION_SECONDS * testParams.sampleRate); - minAmplitude = testParams.amplitude / 4.0; - - // Check to see if the selected formats are supported. - if( Pa_IsFormatSupported( &testParams.inputParameters, NULL, testParams.sampleRate ) != paFormatIsSupported ) - { - printf( "Input not supported for this format!\n" ); - return 0; - } - if( Pa_IsFormatSupported( NULL, &testParams.outputParameters, testParams.sampleRate ) != paFormatIsSupported ) - { - printf( "Output not supported for this format!\n" ); - return 0; - } - - PaQa_SetupLoopbackContext( &loopbackContext, &testParams ); - - if( inputDevice == outputDevice ) - { - // Use full duplex if checking for loopback on one device. - testParams.flags &= ~PAQA_FLAG_TWO_STREAMS; - } - else - { - // Use half duplex if checking for loopback on two different device. - testParams.flags = PAQA_FLAG_TWO_STREAMS; - } - err = PaQa_RunLoopback( &loopbackContext ); - QA_ASSERT_TRUE("loopback detection callback did not run", (loopbackContext.callbackCount > 1) ); - - // Analyse recording to see if we captured the output. - // Start in the middle assuming past latency. - startFrame = testParams.maxFrames/2; - numFrames = testParams.maxFrames/2; - magLeft = PaQa_CorrelateSine( &loopbackContext.recordings[0], - loopbackContext.generators[0].frequency, - testParams.sampleRate, - startFrame, numFrames, NULL ); - magRight = PaQa_CorrelateSine( &loopbackContext.recordings[1], - loopbackContext.generators[1].frequency, - testParams.sampleRate, - startFrame, numFrames, NULL ); - printf(" Amplitudes: left = %f, right = %f\n", magLeft, magRight ); - - // Check for backwards cable. - loopbackIsConnected = ((magLeft > minAmplitude) && (magRight > minAmplitude)); - - if( !loopbackIsConnected ) - { - double magLeftReverse = PaQa_CorrelateSine( &loopbackContext.recordings[0], - loopbackContext.generators[1].frequency, - testParams.sampleRate, - startFrame, numFrames, NULL ); - - double magRightReverse = PaQa_CorrelateSine( &loopbackContext.recordings[1], - loopbackContext.generators[0].frequency, - testParams.sampleRate, - startFrame, numFrames, NULL ); - - if ((magLeftReverse > minAmplitude) && (magRightReverse>minAmplitude)) - { - printf("ERROR - You seem to have the left and right channels swapped on the loopback cable!\n"); - } - } - else - { - double rms = 0.0; - if( PaQa_CheckForClippedLoopback( &loopbackContext ) ) - { - // Clipped so don't use this loopback. - loopbackIsConnected = 0; - } - - err = PaQa_MeasureBackgroundNoise( &loopbackContext, &rms ); - printf(" Background noise = %f\n", rms ); - if( err ) - { - printf("ERROR - Could not measure background noise on this input!\n"); - loopbackIsConnected = 0; - } - else if( rms > MAX_BACKGROUND_NOISE_RMS ) - { - printf("ERROR - There is too much background noise on this input!\n"); - loopbackIsConnected = 0; - } - } - - PaQa_TeardownLoopbackContext( &loopbackContext ); - return loopbackIsConnected; - -error: - PaQa_TeardownLoopbackContext( &loopbackContext ); - return err; -} - -/*******************************************************************/ -/** - * If there is a loopback connection then run the analysis. - */ -static int CheckLoopbackAndScan( UserOptions *userOptions, - PaDeviceIndex iIn, PaDeviceIndex iOut ) -{ - int loopbackConnected = PaQa_CheckForLoopBack( userOptions, iIn, iOut ); - if( loopbackConnected > 0 ) - { - PaQa_AnalyzeLoopbackConnection( userOptions, iIn, iOut ); - return 1; - } - return 0; -} - -/*******************************************************************/ -/** - * Scan every combination of output to input device. - * If a loopback is found the analyse the combination. - * The scan can be overridden using the -i and -o command line options. - */ -static int ScanForLoopback(UserOptions *userOptions) -{ - PaDeviceIndex iIn,iOut; - int numLoopbacks = 0; - int numDevices; - numDevices = Pa_GetDeviceCount(); - - // If both devices are specified then just use that combination. - if ((userOptions->inputDevice >= 0) && (userOptions->outputDevice >= 0)) - { - numLoopbacks += CheckLoopbackAndScan( userOptions, userOptions->inputDevice, userOptions->outputDevice ); - } - else if (userOptions->inputDevice >= 0) - { - // Just scan for output. - for( iOut=0; iOutinputDevice, iOut ); - } - } - else if (userOptions->outputDevice >= 0) - { - // Just scan for input. - for( iIn=0; iInoutputDevice ); - } - } - else - { - // Scan both. - for( iOut=0; iOut 0) ); - return numLoopbacks; - -error: - return -1; -} - -/*==========================================================================================*/ -int TestSampleFormatConversion( void ) -{ - int i; - const float floatInput[] = { 1.0, 0.5, -0.5, -1.0 }; - - const char charInput[] = { 127, 64, -64, -128 }; - const unsigned char ucharInput[] = { 255, 128+64, 64, 0 }; - const short shortInput[] = { 32767, 32768/2, -32768/2, -32768 }; - const int intInput[] = { 2147483647, 2147483647/2, -1073741824 /*-2147483648/2 doesn't work in msvc*/, -2147483648 }; - - float floatOutput[4]; - short shortOutput[4]; - int intOutput[4]; - unsigned char ucharOutput[4]; - char charOutput[4]; - - QA_ASSERT_EQUALS("int must be 32-bit", 4, (int) sizeof(int) ); - QA_ASSERT_EQUALS("short must be 16-bit", 2, (int) sizeof(short) ); - - // from Float ====== - PaQa_ConvertFromFloat( floatInput, 4, paUInt8, ucharOutput ); - for( i=0; i<4; i++ ) - { - QA_ASSERT_CLOSE_INT( "paFloat32 -> paUInt8 -> error", ucharInput[i], ucharOutput[i], 1 ); - } - - PaQa_ConvertFromFloat( floatInput, 4, paInt8, charOutput ); - for( i=0; i<4; i++ ) - { - QA_ASSERT_CLOSE_INT( "paFloat32 -> paInt8 -> error", charInput[i], charOutput[i], 1 ); - } - - PaQa_ConvertFromFloat( floatInput, 4, paInt16, shortOutput ); - for( i=0; i<4; i++ ) - { - QA_ASSERT_CLOSE_INT( "paFloat32 -> paInt16 error", shortInput[i], shortOutput[i], 1 ); - } - - PaQa_ConvertFromFloat( floatInput, 4, paInt32, intOutput ); - for( i=0; i<4; i++ ) - { - QA_ASSERT_CLOSE_INT( "paFloat32 -> paInt32 error", intInput[i], intOutput[i], 0x00010000 ); - } - - - // to Float ====== - memset( floatOutput, 0, sizeof(floatOutput) ); - PaQa_ConvertToFloat( ucharInput, 4, paUInt8, floatOutput ); - for( i=0; i<4; i++ ) - { - QA_ASSERT_CLOSE( "paUInt8 -> paFloat32 error", floatInput[i], floatOutput[i], 0.01 ); - } - - memset( floatOutput, 0, sizeof(floatOutput) ); - PaQa_ConvertToFloat( charInput, 4, paInt8, floatOutput ); - for( i=0; i<4; i++ ) - { - QA_ASSERT_CLOSE( "paInt8 -> paFloat32 error", floatInput[i], floatOutput[i], 0.01 ); - } - - memset( floatOutput, 0, sizeof(floatOutput) ); - PaQa_ConvertToFloat( shortInput, 4, paInt16, floatOutput ); - for( i=0; i<4; i++ ) - { - QA_ASSERT_CLOSE( "paInt16 -> paFloat32 error", floatInput[i], floatOutput[i], 0.001 ); - } - - memset( floatOutput, 0, sizeof(floatOutput) ); - PaQa_ConvertToFloat( intInput, 4, paInt32, floatOutput ); - for( i=0; i<4; i++ ) - { - QA_ASSERT_CLOSE( "paInt32 -> paFloat32 error", floatInput[i], floatOutput[i], 0.00001 ); - } - - return 0; - -error: - return -1; -} - - -/*******************************************************************/ -void usage( const char *name ) -{ - printf("%s [-i# -o# -l# -r# -s# -m -w -dDir]\n", name); - printf(" -i# - Input device ID. Will scan for loopback cable if not specified.\n"); - printf(" -o# - Output device ID. Will scan for loopback if not specified.\n"); - printf(" -l# - Latency for both input and output in milliseconds.\n"); - printf(" --inputLatency # Input latency in milliseconds.\n"); - printf(" --outputLatency # Output latency in milliseconds.\n"); - printf(" -r# - Sample Rate in Hz. Will use multiple common rates if not specified.\n"); - printf(" -s# - Size of callback buffer in frames, framesPerBuffer. Will use common values if not specified.\n"); - printf(" -w - Save bad recordings in a WAV file.\n"); - printf(" -dDir - Path for Directory for WAV files. Default is current directory.\n"); - printf(" -m - Just test the DSP Math code and not the audio devices.\n"); - printf(" -v - Verbose reports.\n"); -} - -/*******************************************************************/ -int main( int argc, char **argv ) -{ - int i; - UserOptions userOptions; - int result = 0; - int justMath = 0; - char *executableName = argv[0]; - - printf("PortAudio LoopBack Test built " __DATE__ " at " __TIME__ "\n"); - - if( argc > 1 ){ - printf("running with arguments:"); - for(i=1; i < argc; ++i ) - printf(" %s", argv[i] ); - printf("\n"); - }else{ - printf("running with no arguments\n"); - } - - memset(&userOptions, 0, sizeof(userOptions)); - userOptions.inputDevice = paNoDevice; - userOptions.outputDevice = paNoDevice; - userOptions.sampleRate = -1; - userOptions.framesPerBuffer = -1; - userOptions.inputLatency = -1; - userOptions.outputLatency = -1; - userOptions.waveFilePath = "."; - - // Process arguments. Skip name of executable. - i = 1; - while( i> 8))) & 0xFF - R = ((cipher + R) * 52845 + 22719) & 0xFFFF - return bytechr(plain), R - - -def _encryptChar(plain, R): - plain = byteord(plain) - cipher = ((plain ^ (R >> 8))) & 0xFF - R = ((cipher + R) * 52845 + 22719) & 0xFFFF - return bytechr(cipher), R - - -def decrypt(cipherstring, R): - r""" - Decrypts a string using the Type 1 encryption algorithm. - - Args: - cipherstring: String of ciphertext. - R: Initial key. - - Returns: - decryptedStr: Plaintext string. - R: Output key for subsequent decryptions. - - Examples:: - - >>> testStr = b"\0\0asdadads asds\265" - >>> decryptedStr, R = decrypt(testStr, 12321) - >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' - True - >>> R == 36142 - True - """ - plainList = [] - for cipher in cipherstring: - plain, R = _decryptChar(cipher, R) - plainList.append(plain) - plainstring = bytesjoin(plainList) - return plainstring, int(R) - - -def encrypt(plainstring, R): - r""" - Encrypts a string using the Type 1 encryption algorithm. - - Note that the algorithm as described in the Type 1 specification requires the - plaintext to be prefixed with a number of random bytes. (For ``eexec`` the - number of random bytes is set to 4.) This routine does *not* add the random - prefix to its input. - - Args: - plainstring: String of plaintext. - R: Initial key. - - Returns: - cipherstring: Ciphertext string. - R: Output key for subsequent encryptions. - - Examples:: - - >>> testStr = b"\0\0asdadads asds\265" - >>> decryptedStr, R = decrypt(testStr, 12321) - >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' - True - >>> R == 36142 - True - - >>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' - >>> encryptedStr, R = encrypt(testStr, 12321) - >>> encryptedStr == b"\0\0asdadads asds\265" - True - >>> R == 36142 - True - """ - cipherList = [] - for plain in plainstring: - cipher, R = _encryptChar(plain, R) - cipherList.append(cipher) - cipherstring = bytesjoin(cipherList) - return cipherstring, int(R) - - -def hexString(s): - import binascii - - return binascii.hexlify(s) - - -def deHexString(h): - import binascii - - h = bytesjoin(h.split()) - return binascii.unhexlify(h) - - -if __name__ == "__main__": - import sys - import doctest - - sys.exit(doctest.testmod().failed) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/code/shared/extensions.ts b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/code/shared/extensions.ts deleted file mode 100644 index 9193acbeca876d8788a5f54f578f8eff2c2652f1..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/code/shared/extensions.ts +++ /dev/null @@ -1,48 +0,0 @@ -import type { Extension } from "@codemirror/state"; -import { - lineNumbers, - highlightSpecialChars, - drawSelection, - rectangularSelection, - crosshairCursor, - keymap -} from "@codemirror/view"; -export { EditorView } from "@codemirror/view"; -import { EditorState } from "@codemirror/state"; -import { - foldGutter, - indentOnInput, - syntaxHighlighting, - defaultHighlightStyle, - foldKeymap -} from "@codemirror/language"; -import { history, defaultKeymap, historyKeymap } from "@codemirror/commands"; -import { - closeBrackets, - closeBracketsKeymap, - completionKeymap -} from "@codemirror/autocomplete"; -import { lintKeymap } from "@codemirror/lint"; - -export const basicSetup: Extension = /*@__PURE__*/ ((): Extension[] => [ - lineNumbers(), - highlightSpecialChars(), - history(), - foldGutter(), - drawSelection(), - EditorState.allowMultipleSelections.of(true), - indentOnInput(), - syntaxHighlighting(defaultHighlightStyle, { fallback: true }), - closeBrackets(), - rectangularSelection(), - crosshairCursor(), - - keymap.of([ - ...closeBracketsKeymap, - ...defaultKeymap, - ...historyKeymap, - ...foldKeymap, - ...completionKeymap, - ...lintKeymap - ]) -])(); diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/themes/base.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/themes/base.py deleted file mode 100644 index deea8394637a06885467ee0645ff92925963848e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/themes/base.py +++ /dev/null @@ -1,1827 +0,0 @@ -from __future__ import annotations - -import json -import re -import tempfile -import textwrap -from pathlib import Path -from typing import Iterable - -import huggingface_hub -import requests -import semantic_version as semver -from gradio_client.documentation import document, set_documentation_group -from huggingface_hub import CommitOperationAdd - -from gradio.themes.utils import ( - colors, - fonts, - get_matching_version, - get_theme_assets, - sizes, -) -from gradio.themes.utils.readme_content import README_CONTENT - -set_documentation_group("themes") - - -class ThemeClass: - def __init__(self): - self._stylesheets = [] - self.name = None - - def _get_theme_css(self): - css = {} - dark_css = {} - - for attr, val in self.__dict__.items(): - if attr.startswith("_"): - continue - if val is None: - if attr.endswith("_dark"): - dark_css[attr[:-5]] = None - continue - else: - raise ValueError( - f"Cannot set '{attr}' to None - only dark mode variables can be None." - ) - val = str(val) - pattern = r"(\*)([\w_]+)(\b)" - - def repl_func(match): - full_match = match.group(0) - if full_match.startswith("*") and full_match.endswith("_dark"): - raise ValueError( - f"Cannot refer '{attr}' to '{val}' - dark variable references are automatically used for dark mode attributes, so do not use the _dark suffix in the value." - ) - if ( - attr.endswith("_dark") - and full_match.startswith("*") - and attr[:-5] == full_match[1:] - ): - raise ValueError( - f"Cannot refer '{attr}' to '{val}' - if dark and light mode values are the same, set dark mode version to None." - ) - - word = match.group(2) - word = word.replace("_", "-") - return f"var(--{word})" - - val = re.sub(pattern, repl_func, val) - - attr = attr.replace("_", "-") - - if attr.endswith("-dark"): - attr = attr[:-5] - dark_css[attr] = val - else: - css[attr] = val - - for attr, val in css.items(): - if attr not in dark_css: - dark_css[attr] = val - - css_code = ( - ":root {\n" - + "\n".join([f" --{attr}: {val};" for attr, val in css.items()]) - + "\n}" - ) - dark_css_code = ( - ".dark {\n" - + "\n".join([f" --{attr}: {val};" for attr, val in dark_css.items()]) - + "\n}" - ) - - return f"{css_code}\n{dark_css_code}" - - def to_dict(self): - """Convert the theme into a python dictionary.""" - schema = {"theme": {}} - for prop in dir(self): - if ( - not prop.startswith("_") - or prop.startswith("_font") - or prop == "_stylesheets" - or prop == "name" - ) and isinstance(getattr(self, prop), (list, str)): - schema["theme"][prop] = getattr(self, prop) - return schema - - @classmethod - def load(cls, path: str) -> ThemeClass: - """Load a theme from a json file. - - Parameters: - path: The filepath to read. - """ - with open(path) as fp: - return cls.from_dict(json.load(fp, object_hook=fonts.as_font)) - - @classmethod - def from_dict(cls, theme: dict[str, dict[str, str]]) -> ThemeClass: - """Create a theme instance from a dictionary representation. - - Parameters: - theme: The dictionary representation of the theme. - """ - new_theme = cls() - for prop, value in theme["theme"].items(): - setattr(new_theme, prop, value) - - # For backwards compatibility, load attributes in base theme not in the loaded theme from the base theme. - base = Base() - for attr in base.__dict__: - if not attr.startswith("_") and not hasattr(new_theme, attr): - setattr(new_theme, attr, getattr(base, attr)) - - return new_theme - - def dump(self, filename: str): - """Write the theme to a json file. - - Parameters: - filename: The path to write the theme too - """ - Path(filename).write_text(json.dumps(self.to_dict(), cls=fonts.FontEncoder)) - - @classmethod - def from_hub(cls, repo_name: str, hf_token: str | None = None): - """Load a theme from the hub. - - This DOES NOT require a HuggingFace account for downloading publicly available themes. - - Parameters: - repo_name: string of the form /@. If a semantic version expression is omitted, the latest version will be fetched. - hf_token: HuggingFace Token. Only needed to download private themes. - """ - if "@" not in repo_name: - name, version = repo_name, None - else: - name, version = repo_name.split("@") - - api = huggingface_hub.HfApi(token=hf_token) - - try: - space_info = api.space_info(name) - except requests.HTTPError as e: - raise ValueError(f"The space {name} does not exist") from e - - assets = get_theme_assets(space_info) - matching_version = get_matching_version(assets, version) - - if not matching_version: - raise ValueError( - f"Cannot find a matching version for expression {version} " - f"from files {[f.filename for f in assets]}" - ) - - theme_file = huggingface_hub.hf_hub_download( - repo_id=name, - repo_type="space", - filename=f"themes/theme_schema@{matching_version.version}.json", - ) - theme = cls.load(theme_file) - theme.name = name - return theme - - @staticmethod - def _get_next_version(space_info: huggingface_hub.hf_api.SpaceInfo) -> str: - assets = get_theme_assets(space_info) - latest_version = max(assets, key=lambda asset: asset.version).version - return str(latest_version.next_patch()) - - @staticmethod - def _theme_version_exists( - space_info: huggingface_hub.hf_api.SpaceInfo, version: str - ) -> bool: - assets = get_theme_assets(space_info) - return any(a.version == semver.Version(version) for a in assets) - - def push_to_hub( - self, - repo_name: str, - org_name: str | None = None, - version: str | None = None, - hf_token: str | None = None, - theme_name: str | None = None, - description: str | None = None, - private: bool = False, - ): - """Upload a theme to the HuggingFace hub. - - This requires a HuggingFace account. - - Parameters: - repo_name: The name of the repository to store the theme assets, e.g. 'my_theme' or 'sunset'. - org_name: The name of the org to save the space in. If None (the default), the username corresponding to the logged in user, or hƒ_token is used. - version: A semantic version tag for theme. Bumping the version tag lets you publish updates to a theme without changing the look of applications that already loaded your theme. - hf_token: API token for your HuggingFace account - theme_name: Name for the name. If None, defaults to repo_name - description: A long form description to your theme. - """ - - from gradio import __version__ - - api = huggingface_hub.HfApi() - - if not hf_token: - try: - author = huggingface_hub.whoami()["name"] - except OSError as e: - raise ValueError( - "In order to push to hub, log in via `huggingface-cli login` " - "or provide a theme_token to push_to_hub. For more information " - "see https://huggingface.co/docs/huggingface_hub/quick-start#login" - ) from e - else: - author = huggingface_hub.whoami(token=hf_token)["name"] - - space_id = f"{org_name or author}/{repo_name}" - - try: - space_info = api.space_info(space_id) - except requests.HTTPError: - space_info = None - - space_exists = space_info is not None - - # If no version, set the version to next patch release - if not version: - version = self._get_next_version(space_info) if space_exists else "0.0.1" - else: - _ = semver.Version(version) - - if space_exists and self._theme_version_exists(space_info, version): - raise ValueError( - f"The space {space_id} already has a " - f"theme with version {version}. See: themes/theme_schema@{version}.json. " - "To manually override this version, use the HuggingFace hub UI." - ) - - theme_name = theme_name or repo_name - - with tempfile.NamedTemporaryFile( - mode="w", delete=False, suffix=".json" - ) as css_file: - contents = self.to_dict() - contents["version"] = version - json.dump(contents, css_file, cls=fonts.FontEncoder) - with tempfile.NamedTemporaryFile(mode="w", delete=False) as readme_file: - readme_content = README_CONTENT.format( - theme_name=theme_name, - description=description or "Add a description of this theme here!", - author=author, - gradio_version=__version__, - ) - readme_file.write(textwrap.dedent(readme_content)) - with tempfile.NamedTemporaryFile(mode="w", delete=False) as app_file: - contents = (Path(__file__).parent / "app.py").read_text() - contents = re.sub( - r"theme=gr.themes.Default\(\)", - f"theme='{space_id}'", - contents, - ) - contents = re.sub(r"{THEME}", theme_name or repo_name, contents) - contents = re.sub(r"{AUTHOR}", org_name or author, contents) - contents = re.sub(r"{SPACE_NAME}", repo_name, contents) - app_file.write(contents) - - operations = [ - CommitOperationAdd( - path_in_repo=f"themes/theme_schema@{version}.json", - path_or_fileobj=css_file.name, - ), - CommitOperationAdd( - path_in_repo="README.md", path_or_fileobj=readme_file.name - ), - CommitOperationAdd(path_in_repo="app.py", path_or_fileobj=app_file.name), - ] - - huggingface_hub.create_repo( - space_id, - repo_type="space", - space_sdk="gradio", - token=hf_token, - exist_ok=True, - private=private, - ) - - api.create_commit( - repo_id=space_id, - commit_message="Updating theme", - repo_type="space", - operations=operations, - token=hf_token, - ) - url = f"https://huggingface.co/spaces/{space_id}" - print(f"See your theme here! {url}") - return url - - -@document("push_to_hub", "from_hub", "load", "dump", "from_dict", "to_dict") -class Base(ThemeClass): - def __init__( - self, - *, - primary_hue: colors.Color | str = colors.blue, - secondary_hue: colors.Color | str = colors.blue, - neutral_hue: colors.Color | str = colors.gray, - text_size: sizes.Size | str = sizes.text_md, - spacing_size: sizes.Size | str = sizes.spacing_md, - radius_size: sizes.Size | str = sizes.radius_md, - font: fonts.Font - | str - | Iterable[fonts.Font | str] = ( - fonts.GoogleFont("Source Sans Pro"), - "ui-sans-serif", - "system-ui", - "sans-serif", - ), - font_mono: fonts.Font - | str - | Iterable[fonts.Font | str] = ( - fonts.GoogleFont("IBM Plex Mono"), - "ui-monospace", - "Consolas", - "monospace", - ), - ): - """ - Parameters: - primary_hue: The primary hue of the theme. Load a preset, like gradio.themes.colors.green (or just the string "green"), or pass your own gradio.themes.utils.Color object. - secondary_hue: The secondary hue of the theme. Load a preset, like gradio.themes.colors.green (or just the string "green"), or pass your own gradio.themes.utils.Color object. - neutral_hue: The neutral hue of the theme, used . Load a preset, like gradio.themes.colors.green (or just the string "green"), or pass your own gradio.themes.utils.Color object. - text_size: The size of the text. Load a preset, like gradio.themes.sizes.text_sm (or just the string "sm"), or pass your own gradio.themes.utils.Size object. - spacing_size: The size of the spacing. Load a preset, like gradio.themes.sizes.spacing_sm (or just the string "sm"), or pass your own gradio.themes.utils.Size object. - radius_size: The radius size of corners. Load a preset, like gradio.themes.sizes.radius_sm (or just the string "sm"), or pass your own gradio.themes.utils.Size object. - font: The primary font to use for the theme. Pass a string for a system font, or a gradio.themes.font.GoogleFont object to load a font from Google Fonts. Pass a list of fonts for fallbacks. - font_mono: The monospace font to use for the theme, applies to code. Pass a string for a system font, or a gradio.themes.font.GoogleFont object to load a font from Google Fonts. Pass a list of fonts for fallbacks. - """ - - self.name = "base" - - def expand_shortcut(shortcut, mode="color", prefix=None): - if not isinstance(shortcut, str): - return shortcut - if mode == "color": - for color in colors.Color.all: - if color.name == shortcut: - return color - raise ValueError(f"Color shortcut {shortcut} not found.") - elif mode == "size": - for size in sizes.Size.all: - if size.name == f"{prefix}_{shortcut}": - return size - raise ValueError(f"Size shortcut {shortcut} not found.") - - primary_hue = expand_shortcut(primary_hue, mode="color") - secondary_hue = expand_shortcut(secondary_hue, mode="color") - neutral_hue = expand_shortcut(neutral_hue, mode="color") - text_size = expand_shortcut(text_size, mode="size", prefix="text") - spacing_size = expand_shortcut(spacing_size, mode="size", prefix="spacing") - radius_size = expand_shortcut(radius_size, mode="size", prefix="radius") - - # Hue ranges - self.primary_50 = primary_hue.c50 - self.primary_100 = primary_hue.c100 - self.primary_200 = primary_hue.c200 - self.primary_300 = primary_hue.c300 - self.primary_400 = primary_hue.c400 - self.primary_500 = primary_hue.c500 - self.primary_600 = primary_hue.c600 - self.primary_700 = primary_hue.c700 - self.primary_800 = primary_hue.c800 - self.primary_900 = primary_hue.c900 - self.primary_950 = primary_hue.c950 - - self.secondary_50 = secondary_hue.c50 - self.secondary_100 = secondary_hue.c100 - self.secondary_200 = secondary_hue.c200 - self.secondary_300 = secondary_hue.c300 - self.secondary_400 = secondary_hue.c400 - self.secondary_500 = secondary_hue.c500 - self.secondary_600 = secondary_hue.c600 - self.secondary_700 = secondary_hue.c700 - self.secondary_800 = secondary_hue.c800 - self.secondary_900 = secondary_hue.c900 - self.secondary_950 = secondary_hue.c950 - - self.neutral_50 = neutral_hue.c50 - self.neutral_100 = neutral_hue.c100 - self.neutral_200 = neutral_hue.c200 - self.neutral_300 = neutral_hue.c300 - self.neutral_400 = neutral_hue.c400 - self.neutral_500 = neutral_hue.c500 - self.neutral_600 = neutral_hue.c600 - self.neutral_700 = neutral_hue.c700 - self.neutral_800 = neutral_hue.c800 - self.neutral_900 = neutral_hue.c900 - self.neutral_950 = neutral_hue.c950 - - # Spacing - self.spacing_xxs = spacing_size.xxs - self.spacing_xs = spacing_size.xs - self.spacing_sm = spacing_size.sm - self.spacing_md = spacing_size.md - self.spacing_lg = spacing_size.lg - self.spacing_xl = spacing_size.xl - self.spacing_xxl = spacing_size.xxl - - self.radius_xxs = radius_size.xxs - self.radius_xs = radius_size.xs - self.radius_sm = radius_size.sm - self.radius_md = radius_size.md - self.radius_lg = radius_size.lg - self.radius_xl = radius_size.xl - self.radius_xxl = radius_size.xxl - - self.text_xxs = text_size.xxs - self.text_xs = text_size.xs - self.text_sm = text_size.sm - self.text_md = text_size.md - self.text_lg = text_size.lg - self.text_xl = text_size.xl - self.text_xxl = text_size.xxl - - # Font - if not isinstance(font, Iterable): - font = [font] - self._font = [ - fontfam if isinstance(fontfam, fonts.Font) else fonts.Font(fontfam) - for fontfam in font - ] - if not isinstance(font_mono, Iterable): - font_mono = [font_mono] - self._font_mono = [ - fontfam if isinstance(fontfam, fonts.Font) else fonts.Font(fontfam) - for fontfam in font_mono - ] - self.font = ", ".join(str(font) for font in self._font) - self.font_mono = ", ".join(str(font) for font in self._font_mono) - - self._stylesheets = [] - for font in self._font + self._font_mono: - font_stylesheet = font.stylesheet() - if font_stylesheet: - self._stylesheets.append(font_stylesheet) - - self.set() - - def set( - self, - *, - # Body Attributes: These set set the values for the entire body of the app. - body_background_fill=None, - body_background_fill_dark=None, - body_text_color=None, - body_text_color_dark=None, - body_text_size=None, - body_text_color_subdued=None, - body_text_color_subdued_dark=None, - body_text_weight=None, - embed_radius=None, - # Element Colors: These set the colors for common elements. - background_fill_primary=None, - background_fill_primary_dark=None, - background_fill_secondary=None, - background_fill_secondary_dark=None, - border_color_accent=None, - border_color_accent_dark=None, - border_color_accent_subdued=None, - border_color_accent_subdued_dark=None, - border_color_primary=None, - border_color_primary_dark=None, - color_accent=None, - color_accent_soft=None, - color_accent_soft_dark=None, - # Text: This sets the text styling for text elements. - link_text_color=None, - link_text_color_dark=None, - link_text_color_active=None, - link_text_color_active_dark=None, - link_text_color_hover=None, - link_text_color_hover_dark=None, - link_text_color_visited=None, - link_text_color_visited_dark=None, - prose_text_size=None, - prose_text_weight=None, - prose_header_text_weight=None, - code_background_fill=None, - code_background_fill_dark=None, - # Shadows: These set the high-level shadow rendering styles. These variables are often referenced by other component-specific shadow variables. - shadow_drop=None, - shadow_drop_lg=None, - shadow_inset=None, - shadow_spread=None, - shadow_spread_dark=None, - # Layout Atoms: These set the style for common layout elements, such as the blocks that wrap components. - block_background_fill=None, - block_background_fill_dark=None, - block_border_color=None, - block_border_color_dark=None, - block_border_width=None, - block_border_width_dark=None, - block_info_text_color=None, - block_info_text_color_dark=None, - block_info_text_size=None, - block_info_text_weight=None, - block_label_background_fill=None, - block_label_background_fill_dark=None, - block_label_border_color=None, - block_label_border_color_dark=None, - block_label_border_width=None, - block_label_border_width_dark=None, - block_label_shadow=None, - block_label_text_color=None, - block_label_text_color_dark=None, - block_label_margin=None, - block_label_padding=None, - block_label_radius=None, - block_label_right_radius=None, - block_label_text_size=None, - block_label_text_weight=None, - block_padding=None, - block_radius=None, - block_shadow=None, - block_shadow_dark=None, - block_title_background_fill=None, - block_title_background_fill_dark=None, - block_title_border_color=None, - block_title_border_color_dark=None, - block_title_border_width=None, - block_title_border_width_dark=None, - block_title_text_color=None, - block_title_text_color_dark=None, - block_title_padding=None, - block_title_radius=None, - block_title_text_size=None, - block_title_text_weight=None, - container_radius=None, - form_gap_width=None, - layout_gap=None, - panel_background_fill=None, - panel_background_fill_dark=None, - panel_border_color=None, - panel_border_color_dark=None, - panel_border_width=None, - panel_border_width_dark=None, - section_header_text_size=None, - section_header_text_weight=None, - # Component Atoms: These set the style for elements within components. - checkbox_background_color=None, - checkbox_background_color_dark=None, - checkbox_background_color_focus=None, - checkbox_background_color_focus_dark=None, - checkbox_background_color_hover=None, - checkbox_background_color_hover_dark=None, - checkbox_background_color_selected=None, - checkbox_background_color_selected_dark=None, - checkbox_border_color=None, - checkbox_border_color_dark=None, - checkbox_border_color_focus=None, - checkbox_border_color_focus_dark=None, - checkbox_border_color_hover=None, - checkbox_border_color_hover_dark=None, - checkbox_border_color_selected=None, - checkbox_border_color_selected_dark=None, - checkbox_border_radius=None, - checkbox_border_width=None, - checkbox_border_width_dark=None, - checkbox_check=None, - radio_circle=None, - checkbox_shadow=None, - checkbox_label_background_fill=None, - checkbox_label_background_fill_dark=None, - checkbox_label_background_fill_hover=None, - checkbox_label_background_fill_hover_dark=None, - checkbox_label_background_fill_selected=None, - checkbox_label_background_fill_selected_dark=None, - checkbox_label_border_color=None, - checkbox_label_border_color_dark=None, - checkbox_label_border_color_hover=None, - checkbox_label_border_color_hover_dark=None, - checkbox_label_border_width=None, - checkbox_label_border_width_dark=None, - checkbox_label_gap=None, - checkbox_label_padding=None, - checkbox_label_shadow=None, - checkbox_label_text_size=None, - checkbox_label_text_weight=None, - checkbox_label_text_color=None, - checkbox_label_text_color_dark=None, - checkbox_label_text_color_selected=None, - checkbox_label_text_color_selected_dark=None, - error_background_fill=None, - error_background_fill_dark=None, - error_border_color=None, - error_border_color_dark=None, - error_border_width=None, - error_border_width_dark=None, - error_text_color=None, - error_text_color_dark=None, - error_icon_color=None, - error_icon_color_dark=None, - input_background_fill=None, - input_background_fill_dark=None, - input_background_fill_focus=None, - input_background_fill_focus_dark=None, - input_background_fill_hover=None, - input_background_fill_hover_dark=None, - input_border_color=None, - input_border_color_dark=None, - input_border_color_focus=None, - input_border_color_focus_dark=None, - input_border_color_hover=None, - input_border_color_hover_dark=None, - input_border_width=None, - input_border_width_dark=None, - input_padding=None, - input_placeholder_color=None, - input_placeholder_color_dark=None, - input_radius=None, - input_shadow=None, - input_shadow_dark=None, - input_shadow_focus=None, - input_shadow_focus_dark=None, - input_text_size=None, - input_text_weight=None, - loader_color=None, - loader_color_dark=None, - slider_color=None, - slider_color_dark=None, - stat_background_fill=None, - stat_background_fill_dark=None, - table_border_color=None, - table_border_color_dark=None, - table_even_background_fill=None, - table_even_background_fill_dark=None, - table_odd_background_fill=None, - table_odd_background_fill_dark=None, - table_radius=None, - table_row_focus=None, - table_row_focus_dark=None, - # Buttons: These set the style for buttons. - button_border_width=None, - button_border_width_dark=None, - button_shadow=None, - button_shadow_active=None, - button_shadow_hover=None, - button_transition=None, - button_large_padding=None, - button_large_radius=None, - button_large_text_size=None, - button_large_text_weight=None, - button_small_padding=None, - button_small_radius=None, - button_small_text_size=None, - button_small_text_weight=None, - button_primary_background_fill=None, - button_primary_background_fill_dark=None, - button_primary_background_fill_hover=None, - button_primary_background_fill_hover_dark=None, - button_primary_border_color=None, - button_primary_border_color_dark=None, - button_primary_border_color_hover=None, - button_primary_border_color_hover_dark=None, - button_primary_text_color=None, - button_primary_text_color_dark=None, - button_primary_text_color_hover=None, - button_primary_text_color_hover_dark=None, - button_secondary_background_fill=None, - button_secondary_background_fill_dark=None, - button_secondary_background_fill_hover=None, - button_secondary_background_fill_hover_dark=None, - button_secondary_border_color=None, - button_secondary_border_color_dark=None, - button_secondary_border_color_hover=None, - button_secondary_border_color_hover_dark=None, - button_secondary_text_color=None, - button_secondary_text_color_dark=None, - button_secondary_text_color_hover=None, - button_secondary_text_color_hover_dark=None, - button_cancel_background_fill=None, - button_cancel_background_fill_dark=None, - button_cancel_background_fill_hover=None, - button_cancel_background_fill_hover_dark=None, - button_cancel_border_color=None, - button_cancel_border_color_dark=None, - button_cancel_border_color_hover=None, - button_cancel_border_color_hover_dark=None, - button_cancel_text_color=None, - button_cancel_text_color_dark=None, - button_cancel_text_color_hover=None, - button_cancel_text_color_hover_dark=None, - ) -> Base: - """ - Parameters: - body_background_fill: The background of the entire app. - body_background_fill_dark: The background of the entire app in dark mode. - body_text_color: The default text color. - body_text_color_dark: The default text color in dark mode. - body_text_size: The default text size. - body_text_color_subdued: The text color used for softer, less important text. - body_text_color_subdued_dark: The text color used for softer, less important text in dark mode. - body_text_weight: The default text weight. - embed_radius: The corner radius used for embedding when the app is embedded within a page. - background_fill_primary: The background primarily used for items placed directly on the page. - background_fill_primary_dark: The background primarily used for items placed directly on the page in dark mode. - background_fill_secondary: The background primarily used for items placed on top of another item. - background_fill_secondary_dark: The background primarily used for items placed on top of another item in dark mode. - border_color_accent: The border color used for accented items. - border_color_accent_dark: The border color used for accented items in dark mode. - border_color_accent_subdued: The subdued border color for accented items. - border_color_accent_subdued_dark: The subdued border color for accented items in dark mode. - border_color_primary: The border color primarily used for items placed directly on the page. - border_color_primary_dark: The border color primarily used for items placed directly on the page in dark mode. - color_accent: The color used for accented items. - color_accent_soft: The softer color used for accented items. - color_accent_soft_dark: The softer color used for accented items in dark mode. - link_text_color: The text color used for links. - link_text_color_dark: The text color used for links in dark mode. - link_text_color_active: The text color used for links when they are active. - link_text_color_active_dark: The text color used for links when they are active in dark mode. - link_text_color_hover: The text color used for links when they are hovered over. - link_text_color_hover_dark: The text color used for links when they are hovered over in dark mode. - link_text_color_visited: The text color used for links when they have been visited. - link_text_color_visited_dark: The text color used for links when they have been visited in dark mode. - prose_text_size: The text size used for markdown and other prose. - prose_text_weight: The text weight used for markdown and other prose. - prose_header_text_weight: The text weight of a header used for markdown and other prose. - code_background_fill: The background color of code blocks. - code_background_fill_dark: The background color of code blocks in dark mode. - shadow_drop: Drop shadow used by other shadowed items. - shadow_drop_lg: Larger drop shadow used by other shadowed items. - shadow_inset: Inset shadow used by other shadowed items. - shadow_spread: Size of shadow spread used by shadowed items. - shadow_spread_dark: Size of shadow spread used by shadowed items in dark mode. - block_background_fill: The background around an item. - block_background_fill_dark: The background around an item in dark mode. - block_border_color: The border color around an item. - block_border_color_dark: The border color around an item in dark mode. - block_border_width: The border width around an item. - block_border_width_dark: The border width around an item in dark mode. - block_info_text_color: The color of the info text. - block_info_text_color_dark: The color of the info text in dark mode. - block_info_text_size: The size of the info text. - block_info_text_weight: The weight of the info text. - block_label_background_fill: The background of the title label of a media element (e.g. image). - block_label_background_fill_dark: The background of the title label of a media element (e.g. image) in dark mode. - block_label_border_color: The border color of the title label of a media element (e.g. image). - block_label_border_color_dark: The border color of the title label of a media element (e.g. image) in dark mode. - block_label_border_width: The border width of the title label of a media element (e.g. image). - block_label_border_width_dark: The border width of the title label of a media element (e.g. image) in dark mode. - block_label_shadow: The shadow of the title label of a media element (e.g. image). - block_label_text_color: The text color of the title label of a media element (e.g. image). - block_label_text_color_dark: The text color of the title label of a media element (e.g. image) in dark mode. - block_label_margin: The margin of the title label of a media element (e.g. image) from its surrounding container. - block_label_padding: The padding of the title label of a media element (e.g. image). - block_label_radius: The corner radius of the title label of a media element (e.g. image). - block_label_right_radius: The corner radius of a right-aligned helper label. - block_label_text_size: The text size of the title label of a media element (e.g. image). - block_label_text_weight: The text weight of the title label of a media element (e.g. image). - block_padding: The padding around an item. - block_radius: The corner radius around an item. - block_shadow: The shadow under an item. - block_shadow_dark: The shadow under an item in dark mode. - block_title_background_fill: The background of the title of a form element (e.g. textbox). - block_title_background_fill_dark: The background of the title of a form element (e.g. textbox) in dark mode. - block_title_border_color: The border color of the title of a form element (e.g. textbox). - block_title_border_color_dark: The border color of the title of a form element (e.g. textbox) in dark mode. - block_title_border_width: The border width of the title of a form element (e.g. textbox). - block_title_border_width_dark: The border width of the title of a form element (e.g. textbox) in dark mode. - block_title_text_color: The text color of the title of a form element (e.g. textbox). - block_title_text_color_dark: The text color of the title of a form element (e.g. textbox) in dark mode. - block_title_padding: The padding of the title of a form element (e.g. textbox). - block_title_radius: The corner radius of the title of a form element (e.g. textbox). - block_title_text_size: The text size of the title of a form element (e.g. textbox). - block_title_text_weight: The text weight of the title of a form element (e.g. textbox). - container_radius: The corner radius of a layout component that holds other content. - form_gap_width: The border gap between form elements, (e.g. consecutive textboxes). - layout_gap: The gap between items within a row or column. - panel_background_fill: The background of a panel. - panel_background_fill_dark: The background of a panel in dark mode. - panel_border_color: The border color of a panel. - panel_border_color_dark: The border color of a panel in dark mode. - panel_border_width: The border width of a panel. - panel_border_width_dark: The border width of a panel in dark mode. - section_header_text_size: The text size of a section header (e.g. tab name). - section_header_text_weight: The text weight of a section header (e.g. tab name). - checkbox_background_color: The background of a checkbox square or radio circle. - checkbox_background_color_dark: The background of a checkbox square or radio circle in dark mode. - checkbox_background_color_focus: The background of a checkbox square or radio circle when focused. - checkbox_background_color_focus_dark: The background of a checkbox square or radio circle when focused in dark mode. - checkbox_background_color_hover: The background of a checkbox square or radio circle when hovered over. - checkbox_background_color_hover_dark: The background of a checkbox square or radio circle when hovered over in dark mode. - checkbox_background_color_selected: The background of a checkbox square or radio circle when selected. - checkbox_background_color_selected_dark: The background of a checkbox square or radio circle when selected in dark mode. - checkbox_border_color: The border color of a checkbox square or radio circle. - checkbox_border_color_dark: The border color of a checkbox square or radio circle in dark mode. - checkbox_border_color_focus: The border color of a checkbox square or radio circle when focused. - checkbox_border_color_focus_dark: The border color of a checkbox square or radio circle when focused in dark mode. - checkbox_border_color_hover: The border color of a checkbox square or radio circle when hovered over. - checkbox_border_color_hover_dark: The border color of a checkbox square or radio circle when hovered over in dark mode. - checkbox_border_color_selected: The border color of a checkbox square or radio circle when selected. - checkbox_border_color_selected_dark: The border color of a checkbox square or radio circle when selected in dark mode. - checkbox_border_radius: The corner radius of a checkbox square. - checkbox_border_width: The border width of a checkbox square or radio circle. - checkbox_border_width_dark: The border width of a checkbox square or radio circle in dark mode. - checkbox_check: The checkmark visual of a checkbox square. - radio_circle: The circle visual of a radio circle. - checkbox_shadow: The shadow of a checkbox square or radio circle. - checkbox_label_background_fill: The background of the surrounding button of a checkbox or radio element. - checkbox_label_background_fill_dark: The background of the surrounding button of a checkbox or radio element in dark mode. - checkbox_label_background_fill_hover: The background of the surrounding button of a checkbox or radio element when hovered over. - checkbox_label_background_fill_hover_dark: The background of the surrounding button of a checkbox or radio element when hovered over in dark mode. - checkbox_label_background_fill_selected: The background of the surrounding button of a checkbox or radio element when selected. - checkbox_label_background_fill_selected_dark: The background of the surrounding button of a checkbox or radio element when selected in dark mode. - checkbox_label_border_color: The border color of the surrounding button of a checkbox or radio element. - checkbox_label_border_color_dark: The border color of the surrounding button of a checkbox or radio element in dark mode. - checkbox_label_border_color_hover: The border color of the surrounding button of a checkbox or radio element when hovered over. - checkbox_label_border_color_hover_dark: The border color of the surrounding button of a checkbox or radio element when hovered over in dark mode. - checkbox_label_border_width: The border width of the surrounding button of a checkbox or radio element. - checkbox_label_border_width_dark: The border width of the surrounding button of a checkbox or radio element in dark mode. - checkbox_label_gap: The gap consecutive checkbox or radio elements. - checkbox_label_padding: The padding of the surrounding button of a checkbox or radio element. - checkbox_label_shadow: The shadow of the surrounding button of a checkbox or radio element. - checkbox_label_text_size: The text size of the label accompanying a checkbox or radio element. - checkbox_label_text_weight: The text weight of the label accompanying a checkbox or radio element. - checkbox_label_text_color: The text color of the label accompanying a checkbox or radio element. - checkbox_label_text_color_dark: The text color of the label accompanying a checkbox or radio element in dark mode. - checkbox_label_text_color_selected: The text color of the label accompanying a checkbox or radio element when selected. - checkbox_label_text_color_selected_dark: The text color of the label accompanying a checkbox or radio element when selected in dark mode. - error_background_fill: The background of an error message. - error_background_fill_dark: The background of an error message in dark mode. - error_border_color: The border color of an error message. - error_border_color_dark: The border color of an error message in dark mode. - error_border_width: The border width of an error message. - error_border_width_dark: The border width of an error message in dark mode. - error_text_color: The text color of an error message. - error_text_color_dark: The text color of an error message in dark mode. - input_background_fill: The background of an input field. - input_background_fill_dark: The background of an input field in dark mode. - input_background_fill_focus: The background of an input field when focused. - input_background_fill_focus_dark: The background of an input field when focused in dark mode. - input_background_fill_hover: The background of an input field when hovered over. - input_background_fill_hover_dark: The background of an input field when hovered over in dark mode. - input_border_color: The border color of an input field. - input_border_color_dark: The border color of an input field in dark mode. - input_border_color_focus: The border color of an input field when focused. - input_border_color_focus_dark: The border color of an input field when focused in dark mode. - input_border_color_hover: The border color of an input field when hovered over. - input_border_color_hover_dark: The border color of an input field when hovered over in dark mode. - input_border_width: The border width of an input field. - input_border_width_dark: The border width of an input field in dark mode. - input_padding: The padding of an input field. - input_placeholder_color: The placeholder text color of an input field. - input_placeholder_color_dark: The placeholder text color of an input field in dark mode. - input_radius: The corner radius of an input field. - input_shadow: The shadow of an input field. - input_shadow_dark: The shadow of an input field in dark mode. - input_shadow_focus: The shadow of an input field when focused. - input_shadow_focus_dark: The shadow of an input field when focused in dark mode. - input_text_size: The text size of an input field. - input_text_weight: The text weight of an input field. - loader_color: The color of the loading animation while a request is pending. - loader_color_dark: The color of the loading animation while a request is pending in dark mode. - slider_color: The color of the slider in a range element. - slider_color_dark: The color of the slider in a range element in dark mode. - stat_background_fill: The background used for stats visuals (e.g. confidence bars in label). - stat_background_fill_dark: The background used for stats visuals (e.g. confidence bars in label) in dark mode. - table_border_color: The border color of a table. - table_border_color_dark: The border color of a table in dark mode. - table_even_background_fill: The background of even rows in a table. - table_even_background_fill_dark: The background of even rows in a table in dark mode. - table_odd_background_fill: The background of odd rows in a table. - table_odd_background_fill_dark: The background of odd rows in a table in dark mode. - table_radius: The corner radius of a table. - table_row_focus: The background of a focused row in a table. - table_row_focus_dark: The background of a focused row in a table in dark mode. - button_border_width: The border width of a button. - button_border_width_dark: The border width of a button in dark mode. - button_cancel_background_fill: The background of a button of "cancel" variant. - button_cancel_background_fill_dark: The background of a button of "cancel" variant in dark mode. - button_cancel_background_fill_hover: The background of a button of "cancel" variant when hovered over. - button_cancel_background_fill_hover_dark: The background of a button of "cancel" variant when hovered over in dark mode. - button_cancel_border_color: The border color of a button of "cancel" variant. - button_cancel_border_color_dark: The border color of a button of "cancel" variant in dark mode. - button_cancel_border_color_hover: The border color of a button of "cancel" variant when hovered over. - button_cancel_border_color_hover_dark: The border color of a button of "cancel" variant when hovered over in dark mode. - button_cancel_text_color: The text color of a button of "cancel" variant. - button_cancel_text_color_dark: The text color of a button of "cancel" variant in dark mode. - button_cancel_text_color_hover: The text color of a button of "cancel" variant when hovered over. - button_cancel_text_color_hover_dark: The text color of a button of "cancel" variant when hovered over in dark mode. - button_large_padding: The padding of a button with the default "large" size. - button_large_radius: The corner radius of a button with the default "large" size. - button_large_text_size: The text size of a button with the default "large" size. - button_large_text_weight: The text weight of a button with the default "large" size. - button_primary_background_fill: The background of a button of "primary" variant. - button_primary_background_fill_dark: The background of a button of "primary" variant in dark mode. - button_primary_background_fill_hover: The background of a button of "primary" variant when hovered over. - button_primary_background_fill_hover_dark: The background of a button of "primary" variant when hovered over in dark mode. - button_primary_border_color: The border color of a button of "primary" variant. - button_primary_border_color_dark: The border color of a button of "primary" variant in dark mode. - button_primary_border_color_hover: The border color of a button of "primary" variant when hovered over. - button_primary_border_color_hover_dark: The border color of a button of "primary" variant when hovered over in dark mode. - button_primary_text_color: The text color of a button of "primary" variant. - button_primary_text_color_dark: The text color of a button of "primary" variant in dark mode. - button_primary_text_color_hover: The text color of a button of "primary" variant when hovered over. - button_primary_text_color_hover_dark: The text color of a button of "primary" variant when hovered over in dark mode. - button_secondary_background_fill: The background of a button of default "secondary" variant. - button_secondary_background_fill_dark: The background of a button of default "secondary" variant in dark mode. - button_secondary_background_fill_hover: The background of a button of default "secondary" variant when hovered over. - button_secondary_background_fill_hover_dark: The background of a button of default "secondary" variant when hovered over in dark mode. - button_secondary_border_color: The border color of a button of default "secondary" variant. - button_secondary_border_color_dark: The border color of a button of default "secondary" variant in dark mode. - button_secondary_border_color_hover: The border color of a button of default "secondary" variant when hovered over. - button_secondary_border_color_hover_dark: The border color of a button of default "secondary" variant when hovered over in dark mode. - button_secondary_text_color: The text color of a button of default "secondary" variant. - button_secondary_text_color_dark: The text color of a button of default "secondary" variant in dark mode. - button_secondary_text_color_hover: The text color of a button of default "secondary" variant when hovered over. - button_secondary_text_color_hover_dark: The text color of a button of default "secondary" variant when hovered over in dark mode. - button_shadow: The shadow under a button. - button_shadow_active: The shadow under a button when pressed. - button_shadow_hover: The shadow under a button when hovered over. - button_small_padding: The padding of a button set to "small" size. - button_small_radius: The corner radius of a button set to "small" size. - button_small_text_size: The text size of a button set to "small" size. - button_small_text_weight: The text weight of a button set to "small" size. - button_transition: The transition animation duration of a button between regular, hover, and focused states. - """ - - # Body - self.body_background_fill = body_background_fill or getattr( - self, "body_background_fill", "*background_fill_primary" - ) - self.body_background_fill_dark = body_background_fill_dark or getattr( - self, "body_background_fill_dark", "*background_fill_primary" - ) - self.body_text_color = body_text_color or getattr( - self, "body_text_color", "*neutral_800" - ) - self.body_text_color_dark = body_text_color_dark or getattr( - self, "body_text_color_dark", "*neutral_100" - ) - self.body_text_size = body_text_size or getattr( - self, "body_text_size", "*text_md" - ) - self.body_text_weight = body_text_weight or getattr( - self, "body_text_weight", "400" - ) - self.embed_radius = embed_radius or getattr(self, "embed_radius", "*radius_lg") - # Core Colors - self.color_accent = color_accent or getattr( - self, "color_accent", "*primary_500" - ) - self.color_accent_soft = color_accent_soft or getattr( - self, "color_accent_soft", "*primary_50" - ) - self.color_accent_soft_dark = color_accent_soft_dark or getattr( - self, "color_accent_soft_dark", "*neutral_700" - ) - self.background_fill_primary = background_fill_primary or getattr( - self, "background_primary", "white" - ) - self.background_fill_primary_dark = background_fill_primary_dark or getattr( - self, "background_primary_dark", "*neutral_950" - ) - self.background_fill_secondary = background_fill_secondary or getattr( - self, "background_secondary", "*neutral_50" - ) - self.background_fill_secondary_dark = background_fill_secondary_dark or getattr( - self, "background_secondary_dark", "*neutral_900" - ) - self.border_color_accent = border_color_accent or getattr( - self, "border_color_accent", "*primary_300" - ) - self.border_color_accent_dark = border_color_accent_dark or getattr( - self, "border_color_accent_dark", "*neutral_600" - ) - self.border_color_primary = border_color_primary or getattr( - self, "border_color_primary", "*neutral_200" - ) - self.border_color_primary_dark = border_color_primary_dark or getattr( - self, "border_color_primary_dark", "*neutral_700" - ) - # Text Colors - self.link_text_color = link_text_color or getattr( - self, "link_text_color", "*secondary_600" - ) - self.link_text_color_active = link_text_color_active or getattr( - self, "link_text_color_active", "*secondary_600" - ) - self.link_text_color_active_dark = link_text_color_active_dark or getattr( - self, "link_text_color_active_dark", "*secondary_500" - ) - self.link_text_color_dark = link_text_color_dark or getattr( - self, "link_text_color_dark", "*secondary_500" - ) - self.link_text_color_hover = link_text_color_hover or getattr( - self, "link_text_color_hover", "*secondary_700" - ) - self.link_text_color_hover_dark = link_text_color_hover_dark or getattr( - self, "link_text_color_hover_dark", "*secondary_400" - ) - self.link_text_color_visited = link_text_color_visited or getattr( - self, "link_text_color_visited", "*secondary_500" - ) - self.link_text_color_visited_dark = link_text_color_visited_dark or getattr( - self, "link_text_color_visited_dark", "*secondary_600" - ) - self.body_text_color_subdued = body_text_color_subdued or getattr( - self, "body_text_color_subdued", "*neutral_400" - ) - self.body_text_color_subdued_dark = body_text_color_subdued_dark or getattr( - self, "body_text_color_subdued_dark", "*neutral_400" - ) - # Shadows - self.shadow_drop = shadow_drop or getattr( - self, "shadow_drop", "rgba(0,0,0,0.05) 0px 1px 2px 0px" - ) - self.shadow_drop_lg = shadow_drop_lg or getattr( - self, - "shadow_drop_lg", - "0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)", - ) - self.shadow_inset = shadow_inset or getattr( - self, "shadow_inset", "rgba(0,0,0,0.05) 0px 2px 4px 0px inset" - ) - self.shadow_spread = shadow_spread or getattr(self, "shadow_spread", "3px") - self.shadow_spread_dark = shadow_spread_dark or getattr( - self, "shadow_spread_dark", "1px" - ) - # Layout Atoms - self.block_background_fill = block_background_fill or getattr( - self, "block_background_fill", "*background_fill_primary" - ) - self.block_background_fill_dark = block_background_fill_dark or getattr( - self, "block_background_fill_dark", "*neutral_800" - ) - self.block_border_color = block_border_color or getattr( - self, "block_border_color", "*border_color_primary" - ) - self.block_border_color_dark = block_border_color_dark or getattr( - self, "block_border_color_dark", "*border_color_primary" - ) - self.block_border_width = block_border_width or getattr( - self, "block_border_width", "1px" - ) - self.block_border_width_dark = block_border_width_dark or getattr( - self, "block_border_width_dark", None - ) - self.block_info_text_color = block_info_text_color or getattr( - self, "block_info_text_color", "*body_text_color_subdued" - ) - self.block_info_text_color_dark = block_info_text_color_dark or getattr( - self, "block_info_text_color_dark", "*body_text_color_subdued" - ) - self.block_info_text_size = block_info_text_size or getattr( - self, "block_info_text_size", "*text_sm" - ) - self.block_info_text_weight = block_info_text_weight or getattr( - self, "block_info_text_weight", "400" - ) - self.block_label_background_fill = block_label_background_fill or getattr( - self, "block_label_background_fill", "*background_fill_primary" - ) - self.block_label_background_fill_dark = ( - block_label_background_fill_dark - or getattr( - self, "block_label_background_fill_dark", "*background_fill_secondary" - ) - ) - self.block_label_border_color = block_label_border_color or getattr( - self, "block_label_border_color", "*border_color_primary" - ) - self.block_label_border_color_dark = block_label_border_color_dark or getattr( - self, "block_label_border_color_dark", "*border_color_primary" - ) - self.block_label_border_width = block_label_border_width or getattr( - self, "block_label_border_width", "1px" - ) - self.block_label_border_width_dark = block_label_border_width_dark or getattr( - self, "block_label_border_width_dark", None - ) - self.block_label_shadow = block_label_shadow or getattr( - self, "block_label_shadow", "*block_shadow" - ) - self.block_label_text_color = block_label_text_color or getattr( - self, "block_label_text_color", "*neutral_500" - ) - self.block_label_text_color_dark = block_label_text_color_dark or getattr( - self, "block_label_text_color_dark", "*neutral_200" - ) - self.block_label_margin = block_label_margin or getattr( - self, "block_label_margin", "0" - ) - self.block_label_padding = block_label_padding or getattr( - self, "block_label_padding", "*spacing_sm *spacing_lg" - ) - self.block_label_radius = block_label_radius or getattr( - self, - "block_label_radius", - "calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px) 0", - ) - self.block_label_right_radius = block_label_right_radius or getattr( - self, - "block_label_right_radius", - "0 calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px)", - ) - self.block_label_text_size = block_label_text_size or getattr( - self, "block_label_text_size", "*text_sm" - ) - self.block_label_text_weight = block_label_text_weight or getattr( - self, "block_label_text_weight", "400" - ) - self.block_padding = block_padding or getattr( - self, "block_padding", "*spacing_xl calc(*spacing_xl + 2px)" - ) - self.block_radius = block_radius or getattr(self, "block_radius", "*radius_lg") - self.block_shadow = block_shadow or getattr(self, "block_shadow", "none") - self.block_shadow_dark = block_shadow_dark or getattr( - self, "block_shadow_dark", None - ) - self.block_title_background_fill = block_title_background_fill or getattr( - self, "block_title_background_fill", "none" - ) - self.block_title_background_fill_dark = ( - block_title_background_fill_dark - or getattr(self, "block_title_background_fill_dark", None) - ) - self.block_title_border_color = block_title_border_color or getattr( - self, "block_title_border_color", "none" - ) - self.block_title_border_color_dark = block_title_border_color_dark or getattr( - self, "block_title_border_color_dark", None - ) - self.block_title_border_width = block_title_border_width or getattr( - self, "block_title_border_width", "0px" - ) - self.block_title_border_width_dark = block_title_border_width_dark or getattr( - self, "block_title_border_width_dark", None - ) - self.block_title_text_color = block_title_text_color or getattr( - self, "block_title_text_color", "*neutral_500" - ) - self.block_title_text_color_dark = block_title_text_color_dark or getattr( - self, "block_title_text_color_dark", "*neutral_200" - ) - self.block_title_padding = block_title_padding or getattr( - self, "block_title_padding", "0" - ) - self.block_title_radius = block_title_radius or getattr( - self, "block_title_radius", "none" - ) - self.block_title_text_size = block_title_text_size or getattr( - self, "block_title_text_size", "*text_md" - ) - self.block_title_text_weight = block_title_text_weight or getattr( - self, "block_title_text_weight", "400" - ) - self.container_radius = container_radius or getattr( - self, "container_radius", "*radius_lg" - ) - self.form_gap_width = form_gap_width or getattr(self, "form_gap_width", "0px") - self.layout_gap = layout_gap or getattr(self, "layout_gap", "*spacing_xxl") - self.panel_background_fill = panel_background_fill or getattr( - self, "panel_background_fill", "*background_fill_secondary" - ) - self.panel_background_fill_dark = panel_background_fill_dark or getattr( - self, "panel_background_fill_dark", "*background_fill_secondary" - ) - self.panel_border_color = panel_border_color or getattr( - self, "panel_border_color", "*border_color_primary" - ) - self.panel_border_color_dark = panel_border_color_dark or getattr( - self, "panel_border_color_dark", "*border_color_primary" - ) - self.panel_border_width = panel_border_width or getattr( - self, "panel_border_width", "0" - ) - self.panel_border_width_dark = panel_border_width_dark or getattr( - self, "panel_border_width_dark", None - ) - self.section_header_text_size = section_header_text_size or getattr( - self, "section_header_text_size", "*text_md" - ) - self.section_header_text_weight = section_header_text_weight or getattr( - self, "section_header_text_weight", "400" - ) - self.border_color_accent_subdued = border_color_accent_subdued or getattr( - self, "border_color_accent_subdued", "*border_color_accent" - ) - self.border_color_accent_subdued_dark = ( - border_color_accent_subdued_dark - or getattr(self, "border_color_accent_subdued_dark", "*border_color_accent") - ) - # Component Atoms - self.code_background_fill = code_background_fill or getattr( - self, "code_background_fill", "*neutral_100" - ) - self.code_background_fill_dark = code_background_fill_dark or getattr( - self, "code_background_fill_dark", "*neutral_800" - ) - self.checkbox_background_color = checkbox_background_color or getattr( - self, "checkbox_background_color", "*background_fill_primary" - ) - self.checkbox_background_color_dark = checkbox_background_color_dark or getattr( - self, "checkbox_background_color_dark", "*neutral_800" - ) - self.checkbox_background_color_focus = ( - checkbox_background_color_focus - or getattr( - self, "checkbox_background_color_focus", "*checkbox_background_color" - ) - ) - self.checkbox_background_color_focus_dark = ( - checkbox_background_color_focus_dark - or getattr( - self, - "checkbox_background_color_focus_dark", - "*checkbox_background_color", - ) - ) - self.checkbox_background_color_hover = ( - checkbox_background_color_hover - or getattr( - self, "checkbox_background_color_hover", "*checkbox_background_color" - ) - ) - self.checkbox_background_color_hover_dark = ( - checkbox_background_color_hover_dark - or getattr( - self, - "checkbox_background_color_hover_dark", - "*checkbox_background_color", - ) - ) - self.checkbox_background_color_selected = ( - checkbox_background_color_selected - or getattr(self, "checkbox_background_color_selected", "*secondary_600") - ) - self.checkbox_background_color_selected_dark = ( - checkbox_background_color_selected_dark - or getattr( - self, "checkbox_background_color_selected_dark", "*secondary_600" - ) - ) - self.checkbox_border_color = checkbox_border_color or getattr( - self, "checkbox_border_color", "*neutral_300" - ) - self.checkbox_border_color_dark = checkbox_border_color_dark or getattr( - self, "checkbox_border_color_dark", "*neutral_700" - ) - self.checkbox_border_color_focus = checkbox_border_color_focus or getattr( - self, "checkbox_border_color_focus", "*secondary_500" - ) - self.checkbox_border_color_focus_dark = ( - checkbox_border_color_focus_dark - or getattr(self, "checkbox_border_color_focus_dark", "*secondary_500") - ) - self.checkbox_border_color_hover = checkbox_border_color_hover or getattr( - self, "checkbox_border_color_hover", "*neutral_300" - ) - self.checkbox_border_color_hover_dark = ( - checkbox_border_color_hover_dark - or getattr(self, "checkbox_border_color_hover_dark", "*neutral_600") - ) - self.checkbox_border_color_selected = checkbox_border_color_selected or getattr( - self, "checkbox_border_color_selected", "*secondary_600" - ) - self.checkbox_border_color_selected_dark = ( - checkbox_border_color_selected_dark - or getattr(self, "checkbox_border_color_selected_dark", "*secondary_600") - ) - self.checkbox_border_radius = checkbox_border_radius or getattr( - self, "checkbox_border_radius", "*radius_sm" - ) - self.checkbox_border_width = checkbox_border_width or getattr( - self, "checkbox_border_width", "*input_border_width" - ) - self.checkbox_border_width_dark = checkbox_border_width_dark or getattr( - self, "checkbox_border_width_dark", "*input_border_width" - ) - self.checkbox_label_background_fill = checkbox_label_background_fill or getattr( - self, "checkbox_label_background_fill", "*button_secondary_background_fill" - ) - self.checkbox_label_background_fill_dark = ( - checkbox_label_background_fill_dark - or getattr( - self, - "checkbox_label_background_fill_dark", - "*button_secondary_background_fill", - ) - ) - self.checkbox_label_background_fill_hover = ( - checkbox_label_background_fill_hover - or getattr( - self, - "checkbox_label_background_fill_hover", - "*button_secondary_background_fill_hover", - ) - ) - self.checkbox_label_background_fill_hover_dark = ( - checkbox_label_background_fill_hover_dark - or getattr( - self, - "checkbox_label_background_fill_hover_dark", - "*button_secondary_background_fill_hover", - ) - ) - self.checkbox_label_background_fill_selected = ( - checkbox_label_background_fill_selected - or getattr( - self, - "checkbox_label_background_fill_selected", - "*checkbox_label_background_fill", - ) - ) - self.checkbox_label_background_fill_selected_dark = ( - checkbox_label_background_fill_selected_dark - or getattr( - self, - "checkbox_label_background_fill_selected_dark", - "*checkbox_label_background_fill", - ) - ) - self.checkbox_label_border_color = checkbox_label_border_color or getattr( - self, "checkbox_label_border_color", "*border_color_primary" - ) - self.checkbox_label_border_color_dark = ( - checkbox_label_border_color_dark - or getattr( - self, "checkbox_label_border_color_dark", "*border_color_primary" - ) - ) - self.checkbox_label_border_color_hover = ( - checkbox_label_border_color_hover - or getattr( - self, - "checkbox_label_border_color_hover", - "*checkbox_label_border_color", - ) - ) - self.checkbox_label_border_color_hover_dark = ( - checkbox_label_border_color_hover_dark - or getattr( - self, - "checkbox_label_border_color_hover_dark", - "*checkbox_label_border_color", - ) - ) - self.checkbox_label_border_width = checkbox_label_border_width or getattr( - self, "checkbox_label_border_width", "*input_border_width" - ) - self.checkbox_label_border_width_dark = ( - checkbox_label_border_width_dark - or getattr(self, "checkbox_label_border_width_dark", "*input_border_width") - ) - self.checkbox_label_gap = checkbox_label_gap or getattr( - self, "checkbox_label_gap", "*spacing_lg" - ) - self.checkbox_label_padding = checkbox_label_padding or getattr( - self, "checkbox_label_padding", "*spacing_md calc(2 * *spacing_md)" - ) - self.checkbox_label_shadow = checkbox_label_shadow or getattr( - self, "checkbox_label_shadow", "none" - ) - self.checkbox_label_text_size = checkbox_label_text_size or getattr( - self, "checkbox_label_text_size", "*text_md" - ) - self.checkbox_label_text_weight = checkbox_label_text_weight or getattr( - self, "checkbox_label_text_weight", "400" - ) - self.checkbox_check = checkbox_check or getattr( - self, - "checkbox_check", - """url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e")""", - ) - self.radio_circle = radio_circle or getattr( - self, - "radio_circle", - """url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e")""", - ) - self.checkbox_shadow = checkbox_shadow or getattr( - self, "checkbox_shadow", "*input_shadow" - ) - self.checkbox_label_text_color = checkbox_label_text_color or getattr( - self, "checkbox_label_text_color", "*body_text_color" - ) - self.checkbox_label_text_color_dark = checkbox_label_text_color_dark or getattr( - self, "checkbox_label_text_color_dark", "*body_text_color" - ) - self.checkbox_label_text_color_selected = ( - checkbox_label_text_color_selected - or getattr( - self, "checkbox_label_text_color_selected", "*checkbox_label_text_color" - ) - ) - self.checkbox_label_text_color_selected_dark = ( - checkbox_label_text_color_selected_dark - or getattr( - self, - "checkbox_label_text_color_selected_dark", - "*checkbox_label_text_color", - ) - ) - self.error_background_fill = error_background_fill or getattr( - self, "error_background_fill", colors.red.c50 - ) - self.error_background_fill_dark = error_background_fill_dark or getattr( - self, "error_background_fill_dark", "*background_fill_primary" - ) - self.error_border_color = error_border_color or getattr( - self, "error_border_color", colors.red.c700 - ) - self.error_border_color_dark = error_border_color_dark or getattr( - self, "error_border_color_dark", colors.red.c500 - ) - self.error_border_width = error_border_width or getattr( - self, "error_border_width", "1px" - ) - self.error_border_width_dark = error_border_width_dark or getattr( - self, "error_border_width_dark", None - ) - self.error_text_color = error_text_color or getattr( - self, "error_text_color", colors.red.c700 - ) - self.error_text_color_dark = error_text_color_dark or getattr( - self, "error_text_color_dark", colors.red.c50 - ) - self.error_icon_color = error_icon_color or getattr( - self, "error_icon_color", colors.red.c700 - ) - self.error_icon_color_dark = error_icon_color_dark or getattr( - self, "error_icon_color_dark", colors.red.c500 - ) - self.input_background_fill = input_background_fill or getattr( - self, "input_background_fill", "*neutral_100" - ) - self.input_background_fill_dark = input_background_fill_dark or getattr( - self, "input_background_fill_dark", "*neutral_700" - ) - self.input_background_fill_focus = input_background_fill_focus or getattr( - self, "input_background_fill_focus", "*secondary_500" - ) - self.input_background_fill_focus_dark = ( - input_background_fill_focus_dark - or getattr(self, "input_background_fill_focus_dark", "*secondary_600") - ) - self.input_background_fill_hover = input_background_fill_hover or getattr( - self, "input_background_fill_hover", "*input_background_fill" - ) - self.input_background_fill_hover_dark = ( - input_background_fill_hover_dark - or getattr( - self, "input_background_fill_hover_dark", "*input_background_fill" - ) - ) - self.input_border_color = input_border_color or getattr( - self, "input_border_color", "*border_color_primary" - ) - self.input_border_color_dark = input_border_color_dark or getattr( - self, "input_border_color_dark", "*border_color_primary" - ) - self.input_border_color_focus = input_border_color_focus or getattr( - self, "input_border_color_focus", "*secondary_300" - ) - self.input_border_color_focus_dark = input_border_color_focus_dark or getattr( - self, "input_border_color_focus_dark", "*neutral_700" - ) - self.input_border_color_hover = input_border_color_hover or getattr( - self, "input_border_color_hover", "*input_border_color" - ) - self.input_border_color_hover_dark = input_border_color_hover_dark or getattr( - self, "input_border_color_hover_dark", "*input_border_color" - ) - self.input_border_width = input_border_width or getattr( - self, "input_border_width", "0px" - ) - self.input_border_width_dark = input_border_width_dark or getattr( - self, "input_border_width_dark", None - ) - self.input_padding = input_padding or getattr( - self, "input_padding", "*spacing_xl" - ) - self.input_placeholder_color = input_placeholder_color or getattr( - self, "input_placeholder_color", "*neutral_400" - ) - self.input_placeholder_color_dark = input_placeholder_color_dark or getattr( - self, "input_placeholder_color_dark", "*neutral_500" - ) - self.input_radius = input_radius or getattr(self, "input_radius", "*radius_lg") - self.input_shadow = input_shadow or getattr(self, "input_shadow", "none") - self.input_shadow_dark = input_shadow_dark or getattr( - self, "input_shadow_dark", None - ) - self.input_shadow_focus = input_shadow_focus or getattr( - self, "input_shadow_focus", "*input_shadow" - ) - self.input_shadow_focus_dark = input_shadow_focus_dark or getattr( - self, "input_shadow_focus_dark", None - ) - self.input_text_size = input_text_size or getattr( - self, "input_text_size", "*text_md" - ) - self.input_text_weight = input_text_weight or getattr( - self, "input_text_weight", "400" - ) - self.loader_color = loader_color or getattr( - self, "loader_color", "*color_accent" - ) - self.loader_color_dark = loader_color_dark or getattr( - self, "loader_color_dark", None - ) - self.prose_text_size = prose_text_size or getattr( - self, "prose_text_size", "*text_md" - ) - self.prose_text_weight = prose_text_weight or getattr( - self, "prose_text_weight", "400" - ) - self.prose_header_text_weight = prose_header_text_weight or getattr( - self, "prose_header_text_weight", "600" - ) - self.slider_color = slider_color or getattr( - self, "slider_color", colors.blue.c600 - ) - self.slider_color_dark = slider_color_dark or getattr( - self, "slider_color_dark", None - ) - self.stat_background_fill = stat_background_fill or getattr( - self, "stat_background_fill", "*primary_300" - ) - self.stat_background_fill_dark = stat_background_fill_dark or getattr( - self, "stat_background_fill_dark", "*primary_500" - ) - self.table_border_color = table_border_color or getattr( - self, "table_border_color", "*neutral_300" - ) - self.table_border_color_dark = table_border_color_dark or getattr( - self, "table_border_color_dark", "*neutral_700" - ) - self.table_even_background_fill = table_even_background_fill or getattr( - self, "table_even_background_fill", "white" - ) - self.table_even_background_fill_dark = ( - table_even_background_fill_dark - or getattr(self, "table_even_background_fill_dark", "*neutral_950") - ) - self.table_odd_background_fill = table_odd_background_fill or getattr( - self, "table_odd_background_fill", "*neutral_50" - ) - self.table_odd_background_fill_dark = table_odd_background_fill_dark or getattr( - self, "table_odd_background_fill_dark", "*neutral_900" - ) - self.table_radius = table_radius or getattr(self, "table_radius", "*radius_lg") - self.table_row_focus = table_row_focus or getattr( - self, "table_row_focus", "*color_accent_soft" - ) - self.table_row_focus_dark = table_row_focus_dark or getattr( - self, "table_row_focus_dark", "*color_accent_soft" - ) - # Buttons - self.button_border_width = button_border_width or getattr( - self, "button_border_width", "*input_border_width" - ) - self.button_border_width_dark = button_border_width_dark or getattr( - self, "button_border_width_dark", "*input_border_width" - ) - self.button_cancel_background_fill = button_cancel_background_fill or getattr( - self, "button_cancel_background_fill", "*button_secondary_background_fill" - ) - self.button_cancel_background_fill_dark = ( - button_cancel_background_fill_dark - or getattr( - self, - "button_cancel_background_fill_dark", - "*button_secondary_background_fill", - ) - ) - self.button_cancel_background_fill_hover = ( - button_cancel_background_fill_hover - or getattr( - self, - "button_cancel_background_fill_hover", - "*button_cancel_background_fill", - ) - ) - self.button_cancel_background_fill_hover_dark = ( - button_cancel_background_fill_hover_dark - or getattr( - self, - "button_cancel_background_fill_hover_dark", - "*button_cancel_background_fill", - ) - ) - self.button_cancel_border_color = button_cancel_border_color or getattr( - self, "button_cancel_border_color", "*button_secondary_border_color" - ) - self.button_cancel_border_color_dark = ( - button_cancel_border_color_dark - or getattr( - self, - "button_cancel_border_color_dark", - "*button_secondary_border_color", - ) - ) - self.button_cancel_border_color_hover = ( - button_cancel_border_color_hover - or getattr( - self, - "button_cancel_border_color_hover", - "*button_cancel_border_color", - ) - ) - self.button_cancel_border_color_hover_dark = ( - button_cancel_border_color_hover_dark - or getattr( - self, - "button_cancel_border_color_hover_dark", - "*button_cancel_border_color", - ) - ) - self.button_cancel_text_color = button_cancel_text_color or getattr( - self, "button_cancel_text_color", "*button_secondary_text_color" - ) - self.button_cancel_text_color_dark = button_cancel_text_color_dark or getattr( - self, "button_cancel_text_color_dark", "*button_secondary_text_color" - ) - self.button_cancel_text_color_hover = button_cancel_text_color_hover or getattr( - self, "button_cancel_text_color_hover", "*button_cancel_text_color" - ) - self.button_cancel_text_color_hover_dark = ( - button_cancel_text_color_hover_dark - or getattr( - self, "button_cancel_text_color_hover_dark", "*button_cancel_text_color" - ) - ) - self.button_large_padding = button_large_padding or getattr( - self, "button_large_padding", "*spacing_lg calc(2 * *spacing_lg)" - ) - self.button_large_radius = button_large_radius or getattr( - self, "button_large_radius", "*radius_lg" - ) - self.button_large_text_size = button_large_text_size or getattr( - self, "button_large_text_size", "*text_lg" - ) - self.button_large_text_weight = button_large_text_weight or getattr( - self, "button_large_text_weight", "600" - ) - self.button_primary_background_fill = button_primary_background_fill or getattr( - self, "button_primary_background_fill", "*primary_200" - ) - self.button_primary_background_fill_dark = ( - button_primary_background_fill_dark - or getattr(self, "button_primary_background_fill_dark", "*primary_700") - ) - self.button_primary_background_fill_hover = ( - button_primary_background_fill_hover - or getattr( - self, - "button_primary_background_fill_hover", - "*button_primary_background_fill", - ) - ) - self.button_primary_background_fill_hover_dark = ( - button_primary_background_fill_hover_dark - or getattr( - self, - "button_primary_background_fill_hover_dark", - "*button_primary_background_fill", - ) - ) - self.button_primary_border_color = button_primary_border_color or getattr( - self, "button_primary_border_color", "*primary_200" - ) - self.button_primary_border_color_dark = ( - button_primary_border_color_dark - or getattr(self, "button_primary_border_color_dark", "*primary_600") - ) - self.button_primary_border_color_hover = ( - button_primary_border_color_hover - or getattr( - self, - "button_primary_border_color_hover", - "*button_primary_border_color", - ) - ) - self.button_primary_border_color_hover_dark = ( - button_primary_border_color_hover_dark - or getattr( - self, - "button_primary_border_color_hover_dark", - "*button_primary_border_color", - ) - ) - self.button_primary_text_color = button_primary_text_color or getattr( - self, "button_primary_text_color", "*primary_600" - ) - self.button_primary_text_color_dark = button_primary_text_color_dark or getattr( - self, "button_primary_text_color_dark", "white" - ) - self.button_primary_text_color_hover = ( - button_primary_text_color_hover - or getattr( - self, "button_primary_text_color_hover", "*button_primary_text_color" - ) - ) - self.button_primary_text_color_hover_dark = ( - button_primary_text_color_hover_dark - or getattr( - self, - "button_primary_text_color_hover_dark", - "*button_primary_text_color", - ) - ) - self.button_secondary_background_fill = ( - button_secondary_background_fill - or getattr(self, "button_secondary_background_fill", "*neutral_200") - ) - self.button_secondary_background_fill_dark = ( - button_secondary_background_fill_dark - or getattr(self, "button_secondary_background_fill_dark", "*neutral_600") - ) - self.button_secondary_background_fill_hover = ( - button_secondary_background_fill_hover - or getattr( - self, - "button_secondary_background_fill_hover", - "*button_secondary_background_fill", - ) - ) - self.button_secondary_background_fill_hover_dark = ( - button_secondary_background_fill_hover_dark - or getattr( - self, - "button_secondary_background_fill_hover_dark", - "*button_secondary_background_fill", - ) - ) - self.button_secondary_border_color = button_secondary_border_color or getattr( - self, "button_secondary_border_color", "*neutral_200" - ) - self.button_secondary_border_color_dark = ( - button_secondary_border_color_dark - or getattr(self, "button_secondary_border_color_dark", "*neutral_600") - ) - self.button_secondary_border_color_hover = ( - button_secondary_border_color_hover - or getattr( - self, - "button_secondary_border_color_hover", - "*button_secondary_border_color", - ) - ) - self.button_secondary_border_color_hover_dark = ( - button_secondary_border_color_hover_dark - or getattr( - self, - "button_secondary_border_color_hover_dark", - "*button_secondary_border_color", - ) - ) - self.button_secondary_text_color = button_secondary_text_color or getattr( - self, "button_secondary_text_color", "*neutral_700" - ) - self.button_secondary_text_color_dark = ( - button_secondary_text_color_dark - or getattr(self, "button_secondary_text_color_dark", "white") - ) - self.button_secondary_text_color_hover = ( - button_secondary_text_color_hover - or getattr( - self, - "button_secondary_text_color_hover", - "*button_secondary_text_color", - ) - ) - self.button_secondary_text_color_hover_dark = ( - button_secondary_text_color_hover_dark - or getattr( - self, - "button_secondary_text_color_hover_dark", - "*button_secondary_text_color", - ) - ) - self.button_shadow = button_shadow or getattr(self, "button_shadow", "none") - self.button_shadow_active = button_shadow_active or getattr( - self, "button_shadow_active", "none" - ) - self.button_shadow_hover = button_shadow_hover or getattr( - self, "button_shadow_hover", "none" - ) - self.button_small_padding = button_small_padding or getattr( - self, "button_small_padding", "*spacing_sm calc(2 * *spacing_sm)" - ) - self.button_small_radius = button_small_radius or getattr( - self, "button_small_radius", "*radius_lg" - ) - self.button_small_text_size = button_small_text_size or getattr( - self, "button_small_text_size", "*text_md" - ) - self.button_small_text_weight = button_small_text_weight or getattr( - self, "button_small_text_weight", "400" - ) - self.button_transition = button_transition or getattr( - self, "button_transition", "background-color 0.2s ease" - ) - return self diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite.py deleted file mode 100644 index 53ee0844e3c58456807bfd7828bdb9cf58f8ed76..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite.py +++ /dev/null @@ -1,555 +0,0 @@ -"""Tests for hermite module. - -""" -from functools import reduce - -import numpy as np -import numpy.polynomial.hermite as herm -from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) - -H0 = np.array([1]) -H1 = np.array([0, 2]) -H2 = np.array([-2, 0, 4]) -H3 = np.array([0, -12, 0, 8]) -H4 = np.array([12, 0, -48, 0, 16]) -H5 = np.array([0, 120, 0, -160, 0, 32]) -H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) -H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) -H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) -H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) - -Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] - - -def trim(x): - return herm.hermtrim(x, tol=1e-6) - - -class TestConstants: - - def test_hermdomain(self): - assert_equal(herm.hermdomain, [-1, 1]) - - def test_hermzero(self): - assert_equal(herm.hermzero, [0]) - - def test_hermone(self): - assert_equal(herm.hermone, [1]) - - def test_hermx(self): - assert_equal(herm.hermx, [0, .5]) - - -class TestArithmetic: - x = np.linspace(-3, 3, 100) - - def test_hermadd(self): - for i in range(5): - for j in range(5): - msg = f"At i={i}, j={j}" - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = herm.hermadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermsub(self): - for i in range(5): - for j in range(5): - msg = f"At i={i}, j={j}" - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = herm.hermsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermmulx(self): - assert_equal(herm.hermmulx([0]), [0]) - assert_equal(herm.hermmulx([1]), [0, .5]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i, 0, .5] - assert_equal(herm.hermmulx(ser), tgt) - - def test_hermmul(self): - # check values of result - for i in range(5): - pol1 = [0]*i + [1] - val1 = herm.hermval(self.x, pol1) - for j in range(5): - msg = f"At i={i}, j={j}" - pol2 = [0]*j + [1] - val2 = herm.hermval(self.x, pol2) - pol3 = herm.hermmul(pol1, pol2) - val3 = herm.hermval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_hermdiv(self): - for i in range(5): - for j in range(5): - msg = f"At i={i}, j={j}" - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = herm.hermadd(ci, cj) - quo, rem = herm.hermdiv(tgt, ci) - res = herm.hermadd(herm.hermmul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermpow(self): - for i in range(5): - for j in range(5): - msg = f"At i={i}, j={j}" - c = np.arange(i + 1) - tgt = reduce(herm.hermmul, [c]*j, np.array([1])) - res = herm.hermpow(c, j) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestEvaluation: - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([2.5, 1., .75]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = polyval(x, [1., 2., 3.]) - - def test_hermval(self): - #check empty input - assert_equal(herm.hermval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [polyval(x, c) for c in Hlist] - for i in range(10): - msg = f"At i={i}" - tgt = y[i] - res = herm.hermval(x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(herm.hermval(x, [1]).shape, dims) - assert_equal(herm.hermval(x, [1, 0]).shape, dims) - assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) - - def test_hermval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = herm.hermval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herm.hermval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_hermval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = herm.hermval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herm.hermval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_hermgrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = herm.hermgrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herm.hermgrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_hermgrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = herm.hermgrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herm.hermgrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral: - - def test_hermint(self): - # check exceptions - assert_raises(TypeError, herm.hermint, [0], .5) - assert_raises(ValueError, herm.hermint, [0], -1) - assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) - assert_raises(ValueError, herm.hermint, [0], lbnd=[0]) - assert_raises(ValueError, herm.hermint, [0], scl=[0]) - assert_raises(TypeError, herm.hermint, [0], axis=.5) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = herm.hermint([0], m=i, k=k) - assert_almost_equal(res, [0, .5]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - hermpol = herm.poly2herm(pol) - hermint = herm.hermint(hermpol, m=1, k=[i]) - res = herm.herm2poly(hermint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - hermpol = herm.poly2herm(pol) - hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(herm.hermval(-1, hermint), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - hermpol = herm.poly2herm(pol) - hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) - res = herm.herm2poly(hermint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herm.hermint(tgt, m=1) - res = herm.hermint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herm.hermint(tgt, m=1, k=[k]) - res = herm.hermint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) - res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herm.hermint(tgt, m=1, k=[k], scl=2) - res = herm.hermint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T - res = herm.hermint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herm.hermint(c) for c in c2d]) - res = herm.hermint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herm.hermint(c, k=3) for c in c2d]) - res = herm.hermint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative: - - def test_hermder(self): - # check exceptions - assert_raises(TypeError, herm.hermder, [0], .5) - assert_raises(ValueError, herm.hermder, [0], -1) - - # check that zeroth derivative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = herm.hermder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = herm.hermder(herm.hermint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T - res = herm.hermder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herm.hermder(c) for c in c2d]) - res = herm.hermder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander: - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_hermvander(self): - # check for 1d x - x = np.arange(3) - v = herm.hermvander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], herm.hermval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = herm.hermvander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], herm.hermval(x, coef)) - - def test_hermvander2d(self): - # also tests hermval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = herm.hermvander2d(x1, x2, [1, 2]) - tgt = herm.hermval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = herm.hermvander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_hermvander3d(self): - # also tests hermval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = herm.hermvander3d(x1, x2, x3, [1, 2, 3]) - tgt = herm.hermval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestFitting: - - def test_hermfit(self): - def f(x): - return x*(x - 1)*(x - 2) - - def f2(x): - return x**4 + x**2 + 1 - - # Test exceptions - assert_raises(ValueError, herm.hermfit, [1], [1], -1) - assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) - assert_raises(TypeError, herm.hermfit, [], [1], 0) - assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) - assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) - assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) - assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) - assert_raises(ValueError, herm.hermfit, [1], [1], [-1,]) - assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6]) - assert_raises(TypeError, herm.hermfit, [1], [1], []) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = herm.hermfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(herm.hermval(x, coef3), y) - coef3 = herm.hermfit(x, y, [0, 1, 2, 3]) - assert_equal(len(coef3), 4) - assert_almost_equal(herm.hermval(x, coef3), y) - # - coef4 = herm.hermfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(herm.hermval(x, coef4), y) - coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4]) - assert_equal(len(coef4), 5) - assert_almost_equal(herm.hermval(x, coef4), y) - # check things still work if deg is not in strict increasing - coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0]) - assert_equal(len(coef4), 5) - assert_almost_equal(herm.hermval(x, coef4), y) - # - coef2d = herm.hermfit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3]) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = herm.hermfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) - assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5]) - # test fitting only even Legendre polynomials - x = np.linspace(-1, 1) - y = f2(x) - coef1 = herm.hermfit(x, y, 4) - assert_almost_equal(herm.hermval(x, coef1), y) - coef2 = herm.hermfit(x, y, [0, 2, 4]) - assert_almost_equal(herm.hermval(x, coef2), y) - assert_almost_equal(coef1, coef2) - - -class TestCompanion: - - def test_raises(self): - assert_raises(ValueError, herm.hermcompanion, []) - assert_raises(ValueError, herm.hermcompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(herm.hermcompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) - - -class TestGauss: - - def test_100(self): - x, w = herm.hermgauss(100) - - # test orthogonality. Note that the results need to be normalized, - # otherwise the huge values that can arise from fast growing - # functions like Laguerre can be very confusing. - v = herm.hermvander(x, 99) - vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:, None] * vv * vd - assert_almost_equal(vv, np.eye(100)) - - # check that the integral of 1 is correct - tgt = np.sqrt(np.pi) - assert_almost_equal(w.sum(), tgt) - - -class TestMisc: - - def test_hermfromroots(self): - res = herm.hermfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = herm.hermfromroots(roots) - res = herm.hermval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(herm.herm2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_hermroots(self): - assert_almost_equal(herm.hermroots([1]), []) - assert_almost_equal(herm.hermroots([1, 1]), [-.5]) - for i in range(2, 5): - tgt = np.linspace(-1, 1, i) - res = herm.hermroots(herm.hermfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermtrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, herm.hermtrim, coef, -1) - - # Test results - assert_equal(herm.hermtrim(coef), coef[:-1]) - assert_equal(herm.hermtrim(coef, 1), coef[:-3]) - assert_equal(herm.hermtrim(coef, 2), [0]) - - def test_hermline(self): - assert_equal(herm.hermline(3, 4), [3, 2]) - - def test_herm2poly(self): - for i in range(10): - assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i]) - - def test_poly2herm(self): - for i in range(10): - assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1]) - - def test_weight(self): - x = np.linspace(-5, 5, 11) - tgt = np.exp(-x**2) - res = herm.hermweight(x) - assert_almost_equal(res, tgt) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/flags.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/flags.py deleted file mode 100644 index 038132f99c82ee17b4b415f2d55e91ac7a8af528..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/flags.py +++ /dev/null @@ -1,119 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING -import weakref - -if TYPE_CHECKING: - from pandas.core.generic import NDFrame - - -class Flags: - """ - Flags that apply to pandas objects. - - .. versionadded:: 1.2.0 - - Parameters - ---------- - obj : Series or DataFrame - The object these flags are associated with. - allows_duplicate_labels : bool, default True - Whether to allow duplicate labels in this object. By default, - duplicate labels are permitted. Setting this to ``False`` will - cause an :class:`errors.DuplicateLabelError` to be raised when - `index` (or columns for DataFrame) is not unique, or any - subsequent operation on introduces duplicates. - See :ref:`duplicates.disallow` for more. - - .. warning:: - - This is an experimental feature. Currently, many methods fail to - propagate the ``allows_duplicate_labels`` value. In future versions - it is expected that every method taking or returning one or more - DataFrame or Series objects will propagate ``allows_duplicate_labels``. - - Examples - -------- - Attributes can be set in two ways: - - >>> df = pd.DataFrame() - >>> df.flags - - >>> df.flags.allows_duplicate_labels = False - >>> df.flags - - - >>> df.flags['allows_duplicate_labels'] = True - >>> df.flags - - """ - - _keys: set[str] = {"allows_duplicate_labels"} - - def __init__(self, obj: NDFrame, *, allows_duplicate_labels: bool) -> None: - self._allows_duplicate_labels = allows_duplicate_labels - self._obj = weakref.ref(obj) - - @property - def allows_duplicate_labels(self) -> bool: - """ - Whether this object allows duplicate labels. - - Setting ``allows_duplicate_labels=False`` ensures that the - index (and columns of a DataFrame) are unique. Most methods - that accept and return a Series or DataFrame will propagate - the value of ``allows_duplicate_labels``. - - See :ref:`duplicates` for more. - - See Also - -------- - DataFrame.attrs : Set global metadata on this object. - DataFrame.set_flags : Set global flags on this object. - - Examples - -------- - >>> df = pd.DataFrame({"A": [1, 2]}, index=['a', 'a']) - >>> df.flags.allows_duplicate_labels - True - >>> df.flags.allows_duplicate_labels = False - Traceback (most recent call last): - ... - pandas.errors.DuplicateLabelError: Index has duplicates. - positions - label - a [0, 1] - """ - return self._allows_duplicate_labels - - @allows_duplicate_labels.setter - def allows_duplicate_labels(self, value: bool) -> None: - value = bool(value) - obj = self._obj() - if obj is None: - raise ValueError("This flag's object has been deleted.") - - if not value: - for ax in obj.axes: - ax._maybe_check_unique() - - self._allows_duplicate_labels = value - - def __getitem__(self, key: str): - if key not in self._keys: - raise KeyError(key) - - return getattr(self, key) - - def __setitem__(self, key: str, value) -> None: - if key not in self._keys: - raise ValueError(f"Unknown flag {key}. Must be one of {self._keys}") - setattr(self, key, value) - - def __repr__(self) -> str: - return f"" - - def __eq__(self, other) -> bool: - if isinstance(other, type(self)): - return self.allows_duplicate_labels == other.allows_duplicate_labels - return False diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rank.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rank.py deleted file mode 100644 index b5b5e42691e59c0b63c8b31fb7a6d0fbab307c4c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rank.py +++ /dev/null @@ -1,502 +0,0 @@ -from datetime import ( - datetime, - timedelta, -) - -import numpy as np -import pytest - -from pandas._libs.algos import ( - Infinity, - NegInfinity, -) - -from pandas import ( - DataFrame, - Series, -) -import pandas._testing as tm - - -class TestRank: - s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]) - df = DataFrame({"A": s, "B": s}) - - results = { - "average": np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]), - "min": np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]), - "max": np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]), - "first": np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]), - "dense": np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]), - } - - @pytest.fixture(params=["average", "min", "max", "first", "dense"]) - def method(self, request): - """ - Fixture for trying all rank methods - """ - return request.param - - def test_rank(self, float_frame): - sp_stats = pytest.importorskip("scipy.stats") - - float_frame.loc[::2, "A"] = np.nan - float_frame.loc[::3, "B"] = np.nan - float_frame.loc[::4, "C"] = np.nan - float_frame.loc[::5, "D"] = np.nan - - ranks0 = float_frame.rank() - ranks1 = float_frame.rank(1) - mask = np.isnan(float_frame.values) - - fvals = float_frame.fillna(np.inf).values - - exp0 = np.apply_along_axis(sp_stats.rankdata, 0, fvals) - exp0[mask] = np.nan - - exp1 = np.apply_along_axis(sp_stats.rankdata, 1, fvals) - exp1[mask] = np.nan - - tm.assert_almost_equal(ranks0.values, exp0) - tm.assert_almost_equal(ranks1.values, exp1) - - # integers - df = DataFrame( - np.random.default_rng(2).integers(0, 5, size=40).reshape((10, 4)) - ) - - result = df.rank() - exp = df.astype(float).rank() - tm.assert_frame_equal(result, exp) - - result = df.rank(1) - exp = df.astype(float).rank(1) - tm.assert_frame_equal(result, exp) - - def test_rank2(self): - df = DataFrame([[1, 3, 2], [1, 2, 3]]) - expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0 - result = df.rank(1, pct=True) - tm.assert_frame_equal(result, expected) - - df = DataFrame([[1, 3, 2], [1, 2, 3]]) - expected = df.rank(0) / 2.0 - result = df.rank(0, pct=True) - tm.assert_frame_equal(result, expected) - - df = DataFrame([["b", "c", "a"], ["a", "c", "b"]]) - expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]]) - result = df.rank(1, numeric_only=False) - tm.assert_frame_equal(result, expected) - - expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]]) - result = df.rank(0, numeric_only=False) - tm.assert_frame_equal(result, expected) - - df = DataFrame([["b", np.nan, "a"], ["a", "c", "b"]]) - expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 3.0, 2.0]]) - result = df.rank(1, numeric_only=False) - tm.assert_frame_equal(result, expected) - - expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 1.0, 2.0]]) - result = df.rank(0, numeric_only=False) - tm.assert_frame_equal(result, expected) - - # f7u12, this does not work without extensive workaround - data = [ - [datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)], - [datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)], - ] - df = DataFrame(data) - - # check the rank - expected = DataFrame([[2.0, np.nan, 1.0], [2.0, 3.0, 1.0]]) - result = df.rank(1, numeric_only=False, ascending=True) - tm.assert_frame_equal(result, expected) - - expected = DataFrame([[1.0, np.nan, 2.0], [2.0, 1.0, 3.0]]) - result = df.rank(1, numeric_only=False, ascending=False) - tm.assert_frame_equal(result, expected) - - df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10, 1e60, 1e80, 1e-30]}) - exp = DataFrame({"a": [3.5, 1.0, 3.5, 5.0, 6.0, 7.0, 2.0]}) - tm.assert_frame_equal(df.rank(), exp) - - def test_rank_does_not_mutate(self): - # GH#18521 - # Check rank does not mutate DataFrame - df = DataFrame( - np.random.default_rng(2).standard_normal((10, 3)), dtype="float64" - ) - expected = df.copy() - df.rank() - result = df - tm.assert_frame_equal(result, expected) - - def test_rank_mixed_frame(self, float_string_frame): - float_string_frame["datetime"] = datetime.now() - float_string_frame["timedelta"] = timedelta(days=1, seconds=1) - - float_string_frame.rank(numeric_only=False) - with pytest.raises(TypeError, match="not supported between instances of"): - float_string_frame.rank(axis=1) - - def test_rank_na_option(self, float_frame): - sp_stats = pytest.importorskip("scipy.stats") - - float_frame.loc[::2, "A"] = np.nan - float_frame.loc[::3, "B"] = np.nan - float_frame.loc[::4, "C"] = np.nan - float_frame.loc[::5, "D"] = np.nan - - # bottom - ranks0 = float_frame.rank(na_option="bottom") - ranks1 = float_frame.rank(1, na_option="bottom") - - fvals = float_frame.fillna(np.inf).values - - exp0 = np.apply_along_axis(sp_stats.rankdata, 0, fvals) - exp1 = np.apply_along_axis(sp_stats.rankdata, 1, fvals) - - tm.assert_almost_equal(ranks0.values, exp0) - tm.assert_almost_equal(ranks1.values, exp1) - - # top - ranks0 = float_frame.rank(na_option="top") - ranks1 = float_frame.rank(1, na_option="top") - - fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values - fval1 = float_frame.T - fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T - fval1 = fval1.fillna(np.inf).values - - exp0 = np.apply_along_axis(sp_stats.rankdata, 0, fval0) - exp1 = np.apply_along_axis(sp_stats.rankdata, 1, fval1) - - tm.assert_almost_equal(ranks0.values, exp0) - tm.assert_almost_equal(ranks1.values, exp1) - - # descending - - # bottom - ranks0 = float_frame.rank(na_option="top", ascending=False) - ranks1 = float_frame.rank(1, na_option="top", ascending=False) - - fvals = float_frame.fillna(np.inf).values - - exp0 = np.apply_along_axis(sp_stats.rankdata, 0, -fvals) - exp1 = np.apply_along_axis(sp_stats.rankdata, 1, -fvals) - - tm.assert_almost_equal(ranks0.values, exp0) - tm.assert_almost_equal(ranks1.values, exp1) - - # descending - - # top - ranks0 = float_frame.rank(na_option="bottom", ascending=False) - ranks1 = float_frame.rank(1, na_option="bottom", ascending=False) - - fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values - fval1 = float_frame.T - fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T - fval1 = fval1.fillna(np.inf).values - - exp0 = np.apply_along_axis(sp_stats.rankdata, 0, -fval0) - exp1 = np.apply_along_axis(sp_stats.rankdata, 1, -fval1) - - tm.assert_numpy_array_equal(ranks0.values, exp0) - tm.assert_numpy_array_equal(ranks1.values, exp1) - - # bad values throw error - msg = "na_option must be one of 'keep', 'top', or 'bottom'" - - with pytest.raises(ValueError, match=msg): - float_frame.rank(na_option="bad", ascending=False) - - # invalid type - with pytest.raises(ValueError, match=msg): - float_frame.rank(na_option=True, ascending=False) - - def test_rank_axis(self): - # check if using axes' names gives the same result - df = DataFrame([[2, 1], [4, 3]]) - tm.assert_frame_equal(df.rank(axis=0), df.rank(axis="index")) - tm.assert_frame_equal(df.rank(axis=1), df.rank(axis="columns")) - - @pytest.mark.parametrize("ax", [0, 1]) - @pytest.mark.parametrize("m", ["average", "min", "max", "first", "dense"]) - def test_rank_methods_frame(self, ax, m): - sp_stats = pytest.importorskip("scipy.stats") - - xs = np.random.default_rng(2).integers(0, 21, (100, 26)) - xs = (xs - 10.0) / 10.0 - cols = [chr(ord("z") - i) for i in range(xs.shape[1])] - - for vals in [xs, xs + 1e6, xs * 1e-6]: - df = DataFrame(vals, columns=cols) - - result = df.rank(axis=ax, method=m) - sprank = np.apply_along_axis( - sp_stats.rankdata, ax, vals, m if m != "first" else "ordinal" - ) - sprank = sprank.astype(np.float64) - expected = DataFrame(sprank, columns=cols).astype("float64") - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize("dtype", ["O", "f8", "i8"]) - def test_rank_descending(self, method, dtype): - if "i" in dtype: - df = self.df.dropna().astype(dtype) - else: - df = self.df.astype(dtype) - - res = df.rank(ascending=False) - expected = (df.max() - df).rank() - tm.assert_frame_equal(res, expected) - - expected = (df.max() - df).rank(method=method) - - if dtype != "O": - res2 = df.rank(method=method, ascending=False, numeric_only=True) - tm.assert_frame_equal(res2, expected) - - res3 = df.rank(method=method, ascending=False, numeric_only=False) - tm.assert_frame_equal(res3, expected) - - @pytest.mark.parametrize("axis", [0, 1]) - @pytest.mark.parametrize("dtype", [None, object]) - def test_rank_2d_tie_methods(self, method, axis, dtype): - df = self.df - - def _check2d(df, expected, method="average", axis=0): - exp_df = DataFrame({"A": expected, "B": expected}) - - if axis == 1: - df = df.T - exp_df = exp_df.T - - result = df.rank(method=method, axis=axis) - tm.assert_frame_equal(result, exp_df) - - frame = df if dtype is None else df.astype(dtype) - _check2d(frame, self.results[method], method=method, axis=axis) - - @pytest.mark.parametrize( - "method,exp", - [ - ("dense", [[1.0, 1.0, 1.0], [1.0, 0.5, 2.0 / 3], [1.0, 0.5, 1.0 / 3]]), - ( - "min", - [ - [1.0 / 3, 1.0, 1.0], - [1.0 / 3, 1.0 / 3, 2.0 / 3], - [1.0 / 3, 1.0 / 3, 1.0 / 3], - ], - ), - ( - "max", - [[1.0, 1.0, 1.0], [1.0, 2.0 / 3, 2.0 / 3], [1.0, 2.0 / 3, 1.0 / 3]], - ), - ( - "average", - [[2.0 / 3, 1.0, 1.0], [2.0 / 3, 0.5, 2.0 / 3], [2.0 / 3, 0.5, 1.0 / 3]], - ), - ( - "first", - [ - [1.0 / 3, 1.0, 1.0], - [2.0 / 3, 1.0 / 3, 2.0 / 3], - [3.0 / 3, 2.0 / 3, 1.0 / 3], - ], - ), - ], - ) - def test_rank_pct_true(self, method, exp): - # see gh-15630. - - df = DataFrame([[2012, 66, 3], [2012, 65, 2], [2012, 65, 1]]) - result = df.rank(method=method, pct=True) - - expected = DataFrame(exp) - tm.assert_frame_equal(result, expected) - - @pytest.mark.single_cpu - def test_pct_max_many_rows(self): - # GH 18271 - df = DataFrame( - {"A": np.arange(2**24 + 1), "B": np.arange(2**24 + 1, 0, -1)} - ) - result = df.rank(pct=True).max() - assert (result == 1).all() - - @pytest.mark.parametrize( - "contents,dtype", - [ - ( - [ - -np.inf, - -50, - -1, - -1e-20, - -1e-25, - -1e-50, - 0, - 1e-40, - 1e-20, - 1e-10, - 2, - 40, - np.inf, - ], - "float64", - ), - ( - [ - -np.inf, - -50, - -1, - -1e-20, - -1e-25, - -1e-45, - 0, - 1e-40, - 1e-20, - 1e-10, - 2, - 40, - np.inf, - ], - "float32", - ), - ([np.iinfo(np.uint8).min, 1, 2, 100, np.iinfo(np.uint8).max], "uint8"), - ( - [ - np.iinfo(np.int64).min, - -100, - 0, - 1, - 9999, - 100000, - 1e10, - np.iinfo(np.int64).max, - ], - "int64", - ), - ([NegInfinity(), "1", "A", "BA", "Ba", "C", Infinity()], "object"), - ( - [datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 5)], - "datetime64", - ), - ], - ) - def test_rank_inf_and_nan(self, contents, dtype, frame_or_series): - dtype_na_map = { - "float64": np.nan, - "float32": np.nan, - "object": None, - "datetime64": np.datetime64("nat"), - } - # Insert nans at random positions if underlying dtype has missing - # value. Then adjust the expected order by adding nans accordingly - # This is for testing whether rank calculation is affected - # when values are interwined with nan values. - values = np.array(contents, dtype=dtype) - exp_order = np.array(range(len(values)), dtype="float64") + 1.0 - if dtype in dtype_na_map: - na_value = dtype_na_map[dtype] - nan_indices = np.random.default_rng(2).choice(range(len(values)), 5) - values = np.insert(values, nan_indices, na_value) - exp_order = np.insert(exp_order, nan_indices, np.nan) - - # Shuffle the testing array and expected results in the same way - random_order = np.random.default_rng(2).permutation(len(values)) - obj = frame_or_series(values[random_order]) - expected = frame_or_series(exp_order[random_order], dtype="float64") - result = obj.rank() - tm.assert_equal(result, expected) - - def test_df_series_inf_nan_consistency(self): - # GH#32593 - index = [5, 4, 3, 2, 1, 6, 7, 8, 9, 10] - col1 = [5, 4, 3, 5, 8, 5, 2, 1, 6, 6] - col2 = [5, 4, np.nan, 5, 8, 5, np.inf, np.nan, 6, -np.inf] - df = DataFrame( - data={ - "col1": col1, - "col2": col2, - }, - index=index, - dtype="f8", - ) - df_result = df.rank() - - series_result = df.copy() - series_result["col1"] = df["col1"].rank() - series_result["col2"] = df["col2"].rank() - - tm.assert_frame_equal(df_result, series_result) - - def test_rank_both_inf(self): - # GH#32593 - df = DataFrame({"a": [-np.inf, 0, np.inf]}) - expected = DataFrame({"a": [1.0, 2.0, 3.0]}) - result = df.rank() - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "na_option,ascending,expected", - [ - ("top", True, [3.0, 1.0, 2.0]), - ("top", False, [2.0, 1.0, 3.0]), - ("bottom", True, [2.0, 3.0, 1.0]), - ("bottom", False, [1.0, 3.0, 2.0]), - ], - ) - def test_rank_inf_nans_na_option( - self, frame_or_series, method, na_option, ascending, expected - ): - obj = frame_or_series([np.inf, np.nan, -np.inf]) - result = obj.rank(method=method, na_option=na_option, ascending=ascending) - expected = frame_or_series(expected) - tm.assert_equal(result, expected) - - @pytest.mark.parametrize( - "na_option,ascending,expected", - [ - ("bottom", True, [1.0, 2.0, 4.0, 3.0]), - ("bottom", False, [1.0, 2.0, 4.0, 3.0]), - ("top", True, [2.0, 3.0, 1.0, 4.0]), - ("top", False, [2.0, 3.0, 1.0, 4.0]), - ], - ) - def test_rank_object_first(self, frame_or_series, na_option, ascending, expected): - obj = frame_or_series(["foo", "foo", None, "foo"]) - result = obj.rank(method="first", na_option=na_option, ascending=ascending) - expected = frame_or_series(expected) - tm.assert_equal(result, expected) - - @pytest.mark.parametrize( - "data,expected", - [ - ({"a": [1, 2, "a"], "b": [4, 5, 6]}, DataFrame({"b": [1.0, 2.0, 3.0]})), - ({"a": [1, 2, "a"]}, DataFrame(index=range(3), columns=[])), - ], - ) - def test_rank_mixed_axis_zero(self, data, expected): - df = DataFrame(data) - with pytest.raises(TypeError, match="'<' not supported between instances of"): - df.rank() - result = df.rank(numeric_only=True) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "dtype, exp_dtype", - [("string[pyarrow]", "Int64"), ("string[pyarrow_numpy]", "float64")], - ) - def test_rank_string_dtype(self, dtype, exp_dtype): - # GH#55362 - pytest.importorskip("pyarrow") - obj = Series(["foo", "foo", None, "foo"], dtype=dtype) - result = obj.rank(method="first") - expected = Series([1, 2, None, 3], dtype=exp_dtype) - tm.assert_series_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py deleted file mode 100644 index f1924185a3df1cae2f0df89ec84225cd68f8fa6d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py +++ /dev/null @@ -1,72 +0,0 @@ -""" Test cases for DataFrame.plot """ - -import pytest - -from pandas import DataFrame -from pandas.tests.plotting.common import _check_visible - -pytest.importorskip("matplotlib") - - -class TestDataFramePlotsGroupby: - def _assert_ytickslabels_visibility(self, axes, expected): - for ax, exp in zip(axes, expected): - _check_visible(ax.get_yticklabels(), visible=exp) - - def _assert_xtickslabels_visibility(self, axes, expected): - for ax, exp in zip(axes, expected): - _check_visible(ax.get_xticklabels(), visible=exp) - - @pytest.mark.parametrize( - "kwargs, expected", - [ - # behavior without keyword - ({}, [True, False, True, False]), - # set sharey=True should be identical - ({"sharey": True}, [True, False, True, False]), - # sharey=False, all yticklabels should be visible - ({"sharey": False}, [True, True, True, True]), - ], - ) - def test_groupby_boxplot_sharey(self, kwargs, expected): - # https://github.com/pandas-dev/pandas/issues/20968 - # sharey can now be switched check whether the right - # pair of axes is turned on or off - df = DataFrame( - { - "a": [-1.43, -0.15, -3.70, -1.43, -0.14], - "b": [0.56, 0.84, 0.29, 0.56, 0.85], - "c": [0, 1, 2, 3, 1], - }, - index=[0, 1, 2, 3, 4], - ) - axes = df.groupby("c").boxplot(**kwargs) - self._assert_ytickslabels_visibility(axes, expected) - - @pytest.mark.parametrize( - "kwargs, expected", - [ - # behavior without keyword - ({}, [True, True, True, True]), - # set sharex=False should be identical - ({"sharex": False}, [True, True, True, True]), - # sharex=True, xticklabels should be visible - # only for bottom plots - ({"sharex": True}, [False, False, True, True]), - ], - ) - def test_groupby_boxplot_sharex(self, kwargs, expected): - # https://github.com/pandas-dev/pandas/issues/20968 - # sharex can now be switched check whether the right - # pair of axes is turned on or off - - df = DataFrame( - { - "a": [-1.43, -0.15, -3.70, -1.43, -0.14], - "b": [0.56, 0.84, 0.29, 0.56, 0.85], - "c": [0, 1, 2, 3, 1], - }, - index=[0, 1, 2, 3, 4], - ) - axes = df.groupby("c").boxplot(**kwargs) - self._assert_xtickslabels_visibility(axes, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/window/test_api.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/window/test_api.py deleted file mode 100644 index 33858e10afd75733d14fd601aba7e778cbb683ff..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/window/test_api.py +++ /dev/null @@ -1,396 +0,0 @@ -import numpy as np -import pytest - -from pandas.errors import ( - DataError, - SpecificationError, -) - -from pandas import ( - DataFrame, - Index, - MultiIndex, - Period, - Series, - Timestamp, - concat, - date_range, - timedelta_range, -) -import pandas._testing as tm - - -def test_getitem(step): - frame = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) - r = frame.rolling(window=5, step=step) - tm.assert_index_equal(r._selected_obj.columns, frame[::step].columns) - - r = frame.rolling(window=5, step=step)[1] - assert r._selected_obj.name == frame[::step].columns[1] - - # technically this is allowed - r = frame.rolling(window=5, step=step)[1, 3] - tm.assert_index_equal(r._selected_obj.columns, frame[::step].columns[[1, 3]]) - - r = frame.rolling(window=5, step=step)[[1, 3]] - tm.assert_index_equal(r._selected_obj.columns, frame[::step].columns[[1, 3]]) - - -def test_select_bad_cols(): - df = DataFrame([[1, 2]], columns=["A", "B"]) - g = df.rolling(window=5) - with pytest.raises(KeyError, match="Columns not found: 'C'"): - g[["C"]] - with pytest.raises(KeyError, match="^[^A]+$"): - # A should not be referenced as a bad column... - # will have to rethink regex if you change message! - g[["A", "C"]] - - -def test_attribute_access(): - df = DataFrame([[1, 2]], columns=["A", "B"]) - r = df.rolling(window=5) - tm.assert_series_equal(r.A.sum(), r["A"].sum()) - msg = "'Rolling' object has no attribute 'F'" - with pytest.raises(AttributeError, match=msg): - r.F - - -def tests_skip_nuisance(step): - df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"}) - r = df.rolling(window=3, step=step) - result = r[["A", "B"]].sum() - expected = DataFrame( - {"A": [np.nan, np.nan, 3, 6, 9], "B": [np.nan, np.nan, 18, 21, 24]}, - columns=list("AB"), - )[::step] - tm.assert_frame_equal(result, expected) - - -def test_sum_object_str_raises(step): - df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"}) - r = df.rolling(window=3, step=step) - with pytest.raises(DataError, match="Cannot aggregate non-numeric type: object"): - # GH#42738, enforced in 2.0 - r.sum() - - -def test_agg(step): - df = DataFrame({"A": range(5), "B": range(0, 10, 2)}) - - r = df.rolling(window=3, step=step) - a_mean = r["A"].mean() - a_std = r["A"].std() - a_sum = r["A"].sum() - b_mean = r["B"].mean() - b_std = r["B"].std() - - with tm.assert_produces_warning(FutureWarning, match="using Rolling.[mean|std]"): - result = r.aggregate([np.mean, np.std]) - expected = concat([a_mean, a_std, b_mean, b_std], axis=1) - expected.columns = MultiIndex.from_product([["A", "B"], ["mean", "std"]]) - tm.assert_frame_equal(result, expected) - - with tm.assert_produces_warning(FutureWarning, match="using Rolling.[mean|std]"): - result = r.aggregate({"A": np.mean, "B": np.std}) - - expected = concat([a_mean, b_std], axis=1) - tm.assert_frame_equal(result, expected, check_like=True) - - result = r.aggregate({"A": ["mean", "std"]}) - expected = concat([a_mean, a_std], axis=1) - expected.columns = MultiIndex.from_tuples([("A", "mean"), ("A", "std")]) - tm.assert_frame_equal(result, expected) - - result = r["A"].aggregate(["mean", "sum"]) - expected = concat([a_mean, a_sum], axis=1) - expected.columns = ["mean", "sum"] - tm.assert_frame_equal(result, expected) - - msg = "nested renamer is not supported" - with pytest.raises(SpecificationError, match=msg): - # using a dict with renaming - r.aggregate({"A": {"mean": "mean", "sum": "sum"}}) - - with pytest.raises(SpecificationError, match=msg): - r.aggregate( - {"A": {"mean": "mean", "sum": "sum"}, "B": {"mean2": "mean", "sum2": "sum"}} - ) - - result = r.aggregate({"A": ["mean", "std"], "B": ["mean", "std"]}) - expected = concat([a_mean, a_std, b_mean, b_std], axis=1) - - exp_cols = [("A", "mean"), ("A", "std"), ("B", "mean"), ("B", "std")] - expected.columns = MultiIndex.from_tuples(exp_cols) - tm.assert_frame_equal(result, expected, check_like=True) - - -@pytest.mark.parametrize( - "func", [["min"], ["mean", "max"], {"b": "sum"}, {"b": "prod", "c": "median"}] -) -def test_multi_axis_1_raises(func): - # GH#46904 - df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5], "c": [6, 7, 8]}) - msg = "Support for axis=1 in DataFrame.rolling is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - r = df.rolling(window=3, axis=1) - with pytest.raises(NotImplementedError, match="axis other than 0 is not supported"): - r.agg(func) - - -def test_agg_apply(raw): - # passed lambda - df = DataFrame({"A": range(5), "B": range(0, 10, 2)}) - - r = df.rolling(window=3) - a_sum = r["A"].sum() - - with tm.assert_produces_warning(FutureWarning, match="using Rolling.[sum|std]"): - result = r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)}) - rcustom = r["B"].apply(lambda x: np.std(x, ddof=1), raw=raw) - expected = concat([a_sum, rcustom], axis=1) - tm.assert_frame_equal(result, expected, check_like=True) - - -def test_agg_consistency(step): - df = DataFrame({"A": range(5), "B": range(0, 10, 2)}) - r = df.rolling(window=3, step=step) - - with tm.assert_produces_warning(FutureWarning, match="using Rolling.[sum|mean]"): - result = r.agg([np.sum, np.mean]).columns - expected = MultiIndex.from_product([list("AB"), ["sum", "mean"]]) - tm.assert_index_equal(result, expected) - - with tm.assert_produces_warning(FutureWarning, match="using Rolling.[sum|mean]"): - result = r["A"].agg([np.sum, np.mean]).columns - expected = Index(["sum", "mean"]) - tm.assert_index_equal(result, expected) - - with tm.assert_produces_warning(FutureWarning, match="using Rolling.[sum|mean]"): - result = r.agg({"A": [np.sum, np.mean]}).columns - expected = MultiIndex.from_tuples([("A", "sum"), ("A", "mean")]) - tm.assert_index_equal(result, expected) - - -def test_agg_nested_dicts(): - # API change for disallowing these types of nested dicts - df = DataFrame({"A": range(5), "B": range(0, 10, 2)}) - r = df.rolling(window=3) - - msg = "nested renamer is not supported" - with pytest.raises(SpecificationError, match=msg): - r.aggregate({"r1": {"A": ["mean", "sum"]}, "r2": {"B": ["mean", "sum"]}}) - - expected = concat( - [r["A"].mean(), r["A"].std(), r["B"].mean(), r["B"].std()], axis=1 - ) - expected.columns = MultiIndex.from_tuples( - [("ra", "mean"), ("ra", "std"), ("rb", "mean"), ("rb", "std")] - ) - with pytest.raises(SpecificationError, match=msg): - r[["A", "B"]].agg({"A": {"ra": ["mean", "std"]}, "B": {"rb": ["mean", "std"]}}) - - with pytest.raises(SpecificationError, match=msg): - r.agg({"A": {"ra": ["mean", "std"]}, "B": {"rb": ["mean", "std"]}}) - - -def test_count_nonnumeric_types(step): - # GH12541 - cols = [ - "int", - "float", - "string", - "datetime", - "timedelta", - "periods", - "fl_inf", - "fl_nan", - "str_nan", - "dt_nat", - "periods_nat", - ] - dt_nat_col = [Timestamp("20170101"), Timestamp("20170203"), Timestamp(None)] - - df = DataFrame( - { - "int": [1, 2, 3], - "float": [4.0, 5.0, 6.0], - "string": list("abc"), - "datetime": date_range("20170101", periods=3), - "timedelta": timedelta_range("1 s", periods=3, freq="s"), - "periods": [ - Period("2012-01"), - Period("2012-02"), - Period("2012-03"), - ], - "fl_inf": [1.0, 2.0, np.inf], - "fl_nan": [1.0, 2.0, np.nan], - "str_nan": ["aa", "bb", np.nan], - "dt_nat": dt_nat_col, - "periods_nat": [ - Period("2012-01"), - Period("2012-02"), - Period(None), - ], - }, - columns=cols, - ) - - expected = DataFrame( - { - "int": [1.0, 2.0, 2.0], - "float": [1.0, 2.0, 2.0], - "string": [1.0, 2.0, 2.0], - "datetime": [1.0, 2.0, 2.0], - "timedelta": [1.0, 2.0, 2.0], - "periods": [1.0, 2.0, 2.0], - "fl_inf": [1.0, 2.0, 2.0], - "fl_nan": [1.0, 2.0, 1.0], - "str_nan": [1.0, 2.0, 1.0], - "dt_nat": [1.0, 2.0, 1.0], - "periods_nat": [1.0, 2.0, 1.0], - }, - columns=cols, - )[::step] - - result = df.rolling(window=2, min_periods=0, step=step).count() - tm.assert_frame_equal(result, expected) - - result = df.rolling(1, min_periods=0, step=step).count() - expected = df.notna().astype(float)[::step] - tm.assert_frame_equal(result, expected) - - -def test_preserve_metadata(): - # GH 10565 - s = Series(np.arange(100), name="foo") - - s2 = s.rolling(30).sum() - s3 = s.rolling(20).sum() - assert s2.name == "foo" - assert s3.name == "foo" - - -@pytest.mark.parametrize( - "func,window_size,expected_vals", - [ - ( - "rolling", - 2, - [ - [np.nan, np.nan, np.nan, np.nan], - [15.0, 20.0, 25.0, 20.0], - [25.0, 30.0, 35.0, 30.0], - [np.nan, np.nan, np.nan, np.nan], - [20.0, 30.0, 35.0, 30.0], - [35.0, 40.0, 60.0, 40.0], - [60.0, 80.0, 85.0, 80], - ], - ), - ( - "expanding", - None, - [ - [10.0, 10.0, 20.0, 20.0], - [15.0, 20.0, 25.0, 20.0], - [20.0, 30.0, 30.0, 20.0], - [10.0, 10.0, 30.0, 30.0], - [20.0, 30.0, 35.0, 30.0], - [26.666667, 40.0, 50.0, 30.0], - [40.0, 80.0, 60.0, 30.0], - ], - ), - ], -) -def test_multiple_agg_funcs(func, window_size, expected_vals): - # GH 15072 - df = DataFrame( - [ - ["A", 10, 20], - ["A", 20, 30], - ["A", 30, 40], - ["B", 10, 30], - ["B", 30, 40], - ["B", 40, 80], - ["B", 80, 90], - ], - columns=["stock", "low", "high"], - ) - - f = getattr(df.groupby("stock"), func) - if window_size: - window = f(window_size) - else: - window = f() - - index = MultiIndex.from_tuples( - [("A", 0), ("A", 1), ("A", 2), ("B", 3), ("B", 4), ("B", 5), ("B", 6)], - names=["stock", None], - ) - columns = MultiIndex.from_tuples( - [("low", "mean"), ("low", "max"), ("high", "mean"), ("high", "min")] - ) - expected = DataFrame(expected_vals, index=index, columns=columns) - - result = window.agg({"low": ["mean", "max"], "high": ["mean", "min"]}) - - tm.assert_frame_equal(result, expected) - - -def test_dont_modify_attributes_after_methods( - arithmetic_win_operators, closed, center, min_periods, step -): - # GH 39554 - roll_obj = Series(range(1)).rolling( - 1, center=center, closed=closed, min_periods=min_periods, step=step - ) - expected = {attr: getattr(roll_obj, attr) for attr in roll_obj._attributes} - getattr(roll_obj, arithmetic_win_operators)() - result = {attr: getattr(roll_obj, attr) for attr in roll_obj._attributes} - assert result == expected - - -def test_centered_axis_validation(step): - # ok - msg = "The 'axis' keyword in Series.rolling is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - Series(np.ones(10)).rolling(window=3, center=True, axis=0, step=step).mean() - - # bad axis - msg = "No axis named 1 for object type Series" - with pytest.raises(ValueError, match=msg): - Series(np.ones(10)).rolling(window=3, center=True, axis=1, step=step).mean() - - # ok ok - df = DataFrame(np.ones((10, 10))) - msg = "The 'axis' keyword in DataFrame.rolling is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - df.rolling(window=3, center=True, axis=0, step=step).mean() - msg = "Support for axis=1 in DataFrame.rolling is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - df.rolling(window=3, center=True, axis=1, step=step).mean() - - # bad axis - msg = "No axis named 2 for object type DataFrame" - with pytest.raises(ValueError, match=msg): - (df.rolling(window=3, center=True, axis=2, step=step).mean()) - - -def test_rolling_min_min_periods(step): - a = Series([1, 2, 3, 4, 5]) - result = a.rolling(window=100, min_periods=1, step=step).min() - expected = Series(np.ones(len(a)))[::step] - tm.assert_series_equal(result, expected) - msg = "min_periods 5 must be <= window 3" - with pytest.raises(ValueError, match=msg): - Series([1, 2, 3]).rolling(window=3, min_periods=5, step=step).min() - - -def test_rolling_max_min_periods(step): - a = Series([1, 2, 3, 4, 5], dtype=np.float64) - result = a.rolling(window=100, min_periods=1, step=step).max() - expected = a[::step] - tm.assert_almost_equal(result, expected) - msg = "min_periods 5 must be <= window 3" - with pytest.raises(ValueError, match=msg): - Series([1, 2, 3]).rolling(window=3, min_periods=5, step=step).max() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/command/py37compat.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/command/py37compat.py deleted file mode 100644 index 754715a5084a9e4f04544ac8a4426d0871a0eb88..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/command/py37compat.py +++ /dev/null @@ -1,30 +0,0 @@ -import sys - - -def _pythonlib_compat(): - """ - On Python 3.7 and earlier, distutils would include the Python - library. See pypa/distutils#9. - """ - from distutils import sysconfig - if not sysconfig.get_config_var('Py_ENABLED_SHARED'): - return - - yield 'python{}.{}{}'.format( - sys.hexversion >> 24, - (sys.hexversion >> 16) & 0xff, - sysconfig.get_config_var('ABIFLAGS'), - ) - - -def compose(f1, f2): - return lambda *args, **kwargs: f1(f2(*args, **kwargs)) - - -pythonlib = ( - compose(list, _pythonlib_compat) - if sys.version_info < (3, 8) - and sys.platform != 'darwin' - and sys.platform[:3] != 'aix' - else list -) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/sandbox.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/sandbox.py deleted file mode 100644 index 034fc80d20ea4a59d77af6f808dbcfc3b87612c3..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/sandbox.py +++ /dev/null @@ -1,530 +0,0 @@ -import os -import sys -import tempfile -import operator -import functools -import itertools -import re -import contextlib -import pickle -import textwrap -import builtins - -import pkg_resources -from distutils.errors import DistutilsError -from pkg_resources import working_set - -if sys.platform.startswith('java'): - import org.python.modules.posix.PosixModule as _os -else: - _os = sys.modules[os.name] -try: - _file = file -except NameError: - _file = None -_open = open - - -__all__ = [ - "AbstractSandbox", - "DirectorySandbox", - "SandboxViolation", - "run_setup", -] - - -def _execfile(filename, globals, locals=None): - """ - Python 3 implementation of execfile. - """ - mode = 'rb' - with open(filename, mode) as stream: - script = stream.read() - if locals is None: - locals = globals - code = compile(script, filename, 'exec') - exec(code, globals, locals) - - -@contextlib.contextmanager -def save_argv(repl=None): - saved = sys.argv[:] - if repl is not None: - sys.argv[:] = repl - try: - yield saved - finally: - sys.argv[:] = saved - - -@contextlib.contextmanager -def save_path(): - saved = sys.path[:] - try: - yield saved - finally: - sys.path[:] = saved - - -@contextlib.contextmanager -def override_temp(replacement): - """ - Monkey-patch tempfile.tempdir with replacement, ensuring it exists - """ - os.makedirs(replacement, exist_ok=True) - - saved = tempfile.tempdir - - tempfile.tempdir = replacement - - try: - yield - finally: - tempfile.tempdir = saved - - -@contextlib.contextmanager -def pushd(target): - saved = os.getcwd() - os.chdir(target) - try: - yield saved - finally: - os.chdir(saved) - - -class UnpickleableException(Exception): - """ - An exception representing another Exception that could not be pickled. - """ - - @staticmethod - def dump(type, exc): - """ - Always return a dumped (pickled) type and exc. If exc can't be pickled, - wrap it in UnpickleableException first. - """ - try: - return pickle.dumps(type), pickle.dumps(exc) - except Exception: - # get UnpickleableException inside the sandbox - from setuptools.sandbox import UnpickleableException as cls - - return cls.dump(cls, cls(repr(exc))) - - -class ExceptionSaver: - """ - A Context Manager that will save an exception, serialized, and restore it - later. - """ - - def __enter__(self): - return self - - def __exit__(self, type, exc, tb): - if not exc: - return - - # dump the exception - self._saved = UnpickleableException.dump(type, exc) - self._tb = tb - - # suppress the exception - return True - - def resume(self): - "restore and re-raise any exception" - - if '_saved' not in vars(self): - return - - type, exc = map(pickle.loads, self._saved) - raise exc.with_traceback(self._tb) - - -@contextlib.contextmanager -def save_modules(): - """ - Context in which imported modules are saved. - - Translates exceptions internal to the context into the equivalent exception - outside the context. - """ - saved = sys.modules.copy() - with ExceptionSaver() as saved_exc: - yield saved - - sys.modules.update(saved) - # remove any modules imported since - del_modules = ( - mod_name - for mod_name in sys.modules - if mod_name not in saved - # exclude any encodings modules. See #285 - and not mod_name.startswith('encodings.') - ) - _clear_modules(del_modules) - - saved_exc.resume() - - -def _clear_modules(module_names): - for mod_name in list(module_names): - del sys.modules[mod_name] - - -@contextlib.contextmanager -def save_pkg_resources_state(): - saved = pkg_resources.__getstate__() - try: - yield saved - finally: - pkg_resources.__setstate__(saved) - - -@contextlib.contextmanager -def setup_context(setup_dir): - temp_dir = os.path.join(setup_dir, 'temp') - with save_pkg_resources_state(): - with save_modules(): - with save_path(): - hide_setuptools() - with save_argv(): - with override_temp(temp_dir): - with pushd(setup_dir): - # ensure setuptools commands are available - __import__('setuptools') - yield - - -_MODULES_TO_HIDE = { - 'setuptools', - 'distutils', - 'pkg_resources', - 'Cython', - '_distutils_hack', -} - - -def _needs_hiding(mod_name): - """ - >>> _needs_hiding('setuptools') - True - >>> _needs_hiding('pkg_resources') - True - >>> _needs_hiding('setuptools_plugin') - False - >>> _needs_hiding('setuptools.__init__') - True - >>> _needs_hiding('distutils') - True - >>> _needs_hiding('os') - False - >>> _needs_hiding('Cython') - True - """ - base_module = mod_name.split('.', 1)[0] - return base_module in _MODULES_TO_HIDE - - -def hide_setuptools(): - """ - Remove references to setuptools' modules from sys.modules to allow the - invocation to import the most appropriate setuptools. This technique is - necessary to avoid issues such as #315 where setuptools upgrading itself - would fail to find a function declared in the metadata. - """ - _distutils_hack = sys.modules.get('_distutils_hack', None) - if _distutils_hack is not None: - _distutils_hack.remove_shim() - - modules = filter(_needs_hiding, sys.modules) - _clear_modules(modules) - - -def run_setup(setup_script, args): - """Run a distutils setup script, sandboxed in its directory""" - setup_dir = os.path.abspath(os.path.dirname(setup_script)) - with setup_context(setup_dir): - try: - sys.argv[:] = [setup_script] + list(args) - sys.path.insert(0, setup_dir) - # reset to include setup dir, w/clean callback list - working_set.__init__() - working_set.callbacks.append(lambda dist: dist.activate()) - - with DirectorySandbox(setup_dir): - ns = dict(__file__=setup_script, __name__='__main__') - _execfile(setup_script, ns) - except SystemExit as v: - if v.args and v.args[0]: - raise - # Normal exit, just return - - -class AbstractSandbox: - """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts""" - - _active = False - - def __init__(self): - self._attrs = [ - name - for name in dir(_os) - if not name.startswith('_') and hasattr(self, name) - ] - - def _copy(self, source): - for name in self._attrs: - setattr(os, name, getattr(source, name)) - - def __enter__(self): - self._copy(self) - if _file: - builtins.file = self._file - builtins.open = self._open - self._active = True - - def __exit__(self, exc_type, exc_value, traceback): - self._active = False - if _file: - builtins.file = _file - builtins.open = _open - self._copy(_os) - - def run(self, func): - """Run 'func' under os sandboxing""" - with self: - return func() - - def _mk_dual_path_wrapper(name): - original = getattr(_os, name) - - def wrap(self, src, dst, *args, **kw): - if self._active: - src, dst = self._remap_pair(name, src, dst, *args, **kw) - return original(src, dst, *args, **kw) - - return wrap - - for name in ["rename", "link", "symlink"]: - if hasattr(_os, name): - locals()[name] = _mk_dual_path_wrapper(name) - - def _mk_single_path_wrapper(name, original=None): - original = original or getattr(_os, name) - - def wrap(self, path, *args, **kw): - if self._active: - path = self._remap_input(name, path, *args, **kw) - return original(path, *args, **kw) - - return wrap - - if _file: - _file = _mk_single_path_wrapper('file', _file) - _open = _mk_single_path_wrapper('open', _open) - for name in [ - "stat", - "listdir", - "chdir", - "open", - "chmod", - "chown", - "mkdir", - "remove", - "unlink", - "rmdir", - "utime", - "lchown", - "chroot", - "lstat", - "startfile", - "mkfifo", - "mknod", - "pathconf", - "access", - ]: - if hasattr(_os, name): - locals()[name] = _mk_single_path_wrapper(name) - - def _mk_single_with_return(name): - original = getattr(_os, name) - - def wrap(self, path, *args, **kw): - if self._active: - path = self._remap_input(name, path, *args, **kw) - return self._remap_output(name, original(path, *args, **kw)) - return original(path, *args, **kw) - - return wrap - - for name in ['readlink', 'tempnam']: - if hasattr(_os, name): - locals()[name] = _mk_single_with_return(name) - - def _mk_query(name): - original = getattr(_os, name) - - def wrap(self, *args, **kw): - retval = original(*args, **kw) - if self._active: - return self._remap_output(name, retval) - return retval - - return wrap - - for name in ['getcwd', 'tmpnam']: - if hasattr(_os, name): - locals()[name] = _mk_query(name) - - def _validate_path(self, path): - """Called to remap or validate any path, whether input or output""" - return path - - def _remap_input(self, operation, path, *args, **kw): - """Called for path inputs""" - return self._validate_path(path) - - def _remap_output(self, operation, path): - """Called for path outputs""" - return self._validate_path(path) - - def _remap_pair(self, operation, src, dst, *args, **kw): - """Called for path pairs like rename, link, and symlink operations""" - return ( - self._remap_input(operation + '-from', src, *args, **kw), - self._remap_input(operation + '-to', dst, *args, **kw), - ) - - -if hasattr(os, 'devnull'): - _EXCEPTIONS = [os.devnull] -else: - _EXCEPTIONS = [] - - -class DirectorySandbox(AbstractSandbox): - """Restrict operations to a single subdirectory - pseudo-chroot""" - - write_ops = dict.fromkeys( - [ - "open", - "chmod", - "chown", - "mkdir", - "remove", - "unlink", - "rmdir", - "utime", - "lchown", - "chroot", - "mkfifo", - "mknod", - "tempnam", - ] - ) - - _exception_patterns = [] - "exempt writing to paths that match the pattern" - - def __init__(self, sandbox, exceptions=_EXCEPTIONS): - self._sandbox = os.path.normcase(os.path.realpath(sandbox)) - self._prefix = os.path.join(self._sandbox, '') - self._exceptions = [ - os.path.normcase(os.path.realpath(path)) for path in exceptions - ] - AbstractSandbox.__init__(self) - - def _violation(self, operation, *args, **kw): - from setuptools.sandbox import SandboxViolation - - raise SandboxViolation(operation, args, kw) - - if _file: - - def _file(self, path, mode='r', *args, **kw): - if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): - self._violation("file", path, mode, *args, **kw) - return _file(path, mode, *args, **kw) - - def _open(self, path, mode='r', *args, **kw): - if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): - self._violation("open", path, mode, *args, **kw) - return _open(path, mode, *args, **kw) - - def tmpnam(self): - self._violation("tmpnam") - - def _ok(self, path): - active = self._active - try: - self._active = False - realpath = os.path.normcase(os.path.realpath(path)) - return ( - self._exempted(realpath) - or realpath == self._sandbox - or realpath.startswith(self._prefix) - ) - finally: - self._active = active - - def _exempted(self, filepath): - start_matches = ( - filepath.startswith(exception) for exception in self._exceptions - ) - pattern_matches = ( - re.match(pattern, filepath) for pattern in self._exception_patterns - ) - candidates = itertools.chain(start_matches, pattern_matches) - return any(candidates) - - def _remap_input(self, operation, path, *args, **kw): - """Called for path inputs""" - if operation in self.write_ops and not self._ok(path): - self._violation(operation, os.path.realpath(path), *args, **kw) - return path - - def _remap_pair(self, operation, src, dst, *args, **kw): - """Called for path pairs like rename, link, and symlink operations""" - if not self._ok(src) or not self._ok(dst): - self._violation(operation, src, dst, *args, **kw) - return (src, dst) - - def open(self, file, flags, mode=0o777, *args, **kw): - """Called for low-level os.open()""" - if flags & WRITE_FLAGS and not self._ok(file): - self._violation("os.open", file, flags, mode, *args, **kw) - return _os.open(file, flags, mode, *args, **kw) - - -WRITE_FLAGS = functools.reduce( - operator.or_, - [ - getattr(_os, a, 0) - for a in "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split() - ], -) - - -class SandboxViolation(DistutilsError): - """A setup script attempted to modify the filesystem outside the sandbox""" - - tmpl = textwrap.dedent( - """ - SandboxViolation: {cmd}{args!r} {kwargs} - - The package setup script has attempted to modify files on your system - that are not within the EasyInstall build area, and has been aborted. - - This package cannot be safely installed by EasyInstall, and may not - support alternate installation locations even if you run its setup - script by hand. Please inform the package's author and the EasyInstall - maintainers to find out if a fix or workaround is available. - """ - ).lstrip() - - def __str__(self): - cmd, args, kwargs = self.args - return self.tmpl.format(**locals()) diff --git a/spaces/pseudolab/PatentClaimsExtraction/README.md b/spaces/pseudolab/PatentClaimsExtraction/README.md deleted file mode 100644 index de7a29634ee726773b91a0c1279488d005c11392..0000000000000000000000000000000000000000 --- a/spaces/pseudolab/PatentClaimsExtraction/README.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: RecordAndPatentYourClaims -emoji: 🔎 -colorFrom: red -colorTo: blue -sdk: streamlit -sdk_version: 1.27.2 -app_file: app.py -pinned: false -license: mit ---- - -The Problem and Market Opportunity - -The intellectual property industry, valued at $367 billion annually, faces a significant issue - the laborious and expensive process of crafting patent claims. With more than 700,000 patent applications filed in the US in 2022, there's a growing need for a game-changing solution. - -The Solution: PatentableClaimExtraction - -PCE offers a novel solution by listening to inventors' conversations, extracting patentable claims from these discussions, and formatting them into the required patent claim format. The result is a dramatic reduction in the time it takes to bring an idea to market, from weeks to mere minutes. This revolutionary approach caters to individual inventors and small to medium-sized enterprises (SMEs), democratizing the patenting process. - -Time-Efficiency: Reducing patent application time from weeks to minutes. -Cost Reduction: Substantial savings on legal fees. -Accessibility: Making patent protection accessible to smaller innovators. -Accuracy: AI-driven extraction ensures high-quality patent claims. -Market Size and Competitive Landscape - -Our primary target is the SME and individual innovator market, which accounts for 60% of patent applicants and a $220 billion market share. With limited competition in the AI-driven patent claim extraction sector, PCE holds a unique position. Our proprietary algorithms offer a significant edge in this market. - -PCE's business model includes subscription-based pricing tiers, a freemium model for individual inventors, and the licensing of our API to law firms and IP consultants. To drive adoption, we will partner with innovation hubs, accelerators, universities, law firms, and IP consultants. Continuous algorithmic improvement will further secure our market position. - -Behind this groundbreaking venture is a dedicated team with extensive backgrounds in AI, IP law, and tech entrepreneurship. Our experts in AI development, legal expertise, and business acumen collectively drive PCE's success. \ No newline at end of file diff --git a/spaces/pustozerov/poc-handwriting-ocr/pages/handwriting_matcher.py b/spaces/pustozerov/poc-handwriting-ocr/pages/handwriting_matcher.py deleted file mode 100644 index a94ef2b0051424258b1e8d3ec8b687969a1ab765..0000000000000000000000000000000000000000 --- a/spaces/pustozerov/poc-handwriting-ocr/pages/handwriting_matcher.py +++ /dev/null @@ -1,39 +0,0 @@ -import glob -import os -import random -import shutil - -import pandas as pd -import streamlit as st - -from modules.data_generator.generate_sample import generate_phrase_image - -st.title('Cyrillic handwriting OCR demo') -st.write('This simple demo shows the difference between real and automatically generated handwriting Cyrillic texts. ' - 'The algorithm randomly picks three images from various documents (on the left) and generates artificially ' - 'created images with the same texts.') - -if st.button('Try random samples from the database'): - folder = "data/sample/" - os.makedirs(folder, exist_ok=True) - list_all_images = glob.glob("data/cyrillic_handwriting_dataset/data_decimated/*.png") - chosen_files = sorted(random.sample(list_all_images, 3)) - for f in glob.glob(folder + '*'): - os.remove(f) - for f in chosen_files: - path = shutil.copy2(f, folder) - - df_labels = pd.read_csv("data/cyrillic_handwriting_dataset/labels_decimated.csv", sep='|') - labels_to_take = [] - col1, col2 = st.columns(2) - with col1: - st.text("Real sample from the document") - with col2: - st.text("Automatically generated text") - for f in glob.glob(folder + '*'): - with col1: - st.image(f) - with col2: - text_input = df_labels.loc[df_labels["file_name"] == os.path.basename(f), 'label'].values[0] - image_phrase = generate_phrase_image(text_input, False) - st.image(image_phrase) diff --git a/spaces/pyodide-demo/self-hosted/test.js b/spaces/pyodide-demo/self-hosted/test.js deleted file mode 100644 index 8643c83358521b3f840f6cd9f64ee03055400f22..0000000000000000000000000000000000000000 --- a/spaces/pyodide-demo/self-hosted/test.js +++ /dev/null @@ -1 +0,0 @@ -var Module=typeof globalThis.__pyodide_module!=="undefined"?globalThis.__pyodide_module:{};if(!Module.expectedDataFileDownloads){Module.expectedDataFileDownloads=0}Module.expectedDataFileDownloads++;(function(){var loadPackage=function(metadata){var PACKAGE_PATH="";if(typeof window==="object"){PACKAGE_PATH=window["encodeURIComponent"](window.location.pathname.toString().substring(0,window.location.pathname.toString().lastIndexOf("/"))+"/")}else if(typeof process==="undefined"&&typeof location!=="undefined"){PACKAGE_PATH=encodeURIComponent(location.pathname.toString().substring(0,location.pathname.toString().lastIndexOf("/"))+"/")}var PACKAGE_NAME="build/test.data";var REMOTE_PACKAGE_BASE="test.data";if(typeof Module["locateFilePackage"]==="function"&&!Module["locateFile"]){Module["locateFile"]=Module["locateFilePackage"];err("warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)")}var REMOTE_PACKAGE_NAME=Module["locateFile"]?Module["locateFile"](REMOTE_PACKAGE_BASE,""):REMOTE_PACKAGE_BASE;var REMOTE_PACKAGE_SIZE=metadata["remote_package_size"];var PACKAGE_UUID=metadata["package_uuid"];function fetchRemotePackage(packageName,packageSize,callback,errback){if(typeof process==="object"){require("fs").readFile(packageName,(function(err,contents){if(err){errback(err)}else{callback(contents.buffer)}}));return}var xhr=new XMLHttpRequest;xhr.open("GET",packageName,true);xhr.responseType="arraybuffer";xhr.onprogress=function(event){var url=packageName;var size=packageSize;if(event.total)size=event.total;if(event.loaded){if(!xhr.addedTotal){xhr.addedTotal=true;if(!Module.dataFileDownloads)Module.dataFileDownloads={};Module.dataFileDownloads[url]={loaded:event.loaded,total:size}}else{Module.dataFileDownloads[url].loaded=event.loaded}var total=0;var loaded=0;var num=0;for(var download in Module.dataFileDownloads){var data=Module.dataFileDownloads[download];total+=data.total;loaded+=data.loaded;num++}total=Math.ceil(total*Module.expectedDataFileDownloads/num);if(Module["setStatus"])Module["setStatus"]("Downloading data... ("+loaded+"/"+total+")")}else if(!Module.dataFileDownloads){if(Module["setStatus"])Module["setStatus"]("Downloading data...")}};xhr.onerror=function(event){throw new Error("NetworkError for: "+packageName)};xhr.onload=function(event){if(xhr.status==200||xhr.status==304||xhr.status==206||xhr.status==0&&xhr.response){var packageData=xhr.response;callback(packageData)}else{throw new Error(xhr.statusText+" : "+xhr.responseURL)}};xhr.send(null)}function handleError(error){console.error("package error:",error)}var fetchedCallback=null;var fetched=Module["getPreloadedPackage"]?Module["getPreloadedPackage"](REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE):null;if(!fetched)fetchRemotePackage(REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE,(function(data){if(fetchedCallback){fetchedCallback(data);fetchedCallback=null}else{fetched=data}}),handleError);function runWithFS(){function assert(check,msg){if(!check)throw msg+(new Error).stack}Module["FS_createPath"]("/","lib",true,true);Module["FS_createPath"]("/lib","python3.9",true,true);Module["FS_createPath"]("/lib/python3.9","test",true,true);Module["FS_createPath"]("/lib/python3.9/test","audiodata",true,true);Module["FS_createPath"]("/lib/python3.9/test","capath",true,true);Module["FS_createPath"]("/lib/python3.9/test","data",true,true);Module["FS_createPath"]("/lib/python3.9/test","cjkencodings",true,true);Module["FS_createPath"]("/lib/python3.9/test","decimaltestdata",true,true);Module["FS_createPath"]("/lib/python3.9/test","xmltestdata",true,true);Module["FS_createPath"]("/lib/python3.9/test/xmltestdata","c14n-20",true,true);Module["FS_createPath"]("/lib/python3.9/test","dtracedata",true,true);Module["FS_createPath"]("/lib/python3.9/test","eintrdata",true,true);Module["FS_createPath"]("/lib/python3.9/test","imghdrdata",true,true);Module["FS_createPath"]("/lib/python3.9/test","libregrtest",true,true);Module["FS_createPath"]("/lib/python3.9/test","subprocessdata",true,true);Module["FS_createPath"]("/lib/python3.9/test","sndhdrdata",true,true);Module["FS_createPath"]("/lib/python3.9/test","support",true,true);Module["FS_createPath"]("/lib/python3.9/test","tracedmodules",true,true);Module["FS_createPath"]("/lib/python3.9/test","encoded_modules",true,true);Module["FS_createPath"]("/lib/python3.9/test","test_import",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_import","data",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_import/data","circular_imports",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_import/data/circular_imports","subpkg",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_import/data","package",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_import/data","package2",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_import/data","unwritable",true,true);Module["FS_createPath"]("/lib/python3.9/test","test_importlib",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","builtin",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","data",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","data01",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/data01","subdirectory",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","data02",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/data02","one",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/data02","two",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","data03",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/data03","namespace",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/data03/namespace","portion1",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/data03/namespace","portion2",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","extension",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","frozen",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","import_",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","namespace_pkgs",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs","both_portions",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs/both_portions","foo",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs","module_and_namespace_package",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs/module_and_namespace_package","a_test",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs","not_a_namespace_pkg",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs/not_a_namespace_pkg","foo",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs","portion1",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs/portion1","foo",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs","portion2",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs/portion2","foo",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs","project1",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs/project1","parent",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs/project1/parent","child",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs","project2",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs/project2","parent",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs/project2/parent","child",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs","project3",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs/project3","parent",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib/namespace_pkgs/project3/parent","child",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","partial",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","source",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","zipdata01",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_importlib","zipdata02",true,true);Module["FS_createPath"]("/lib/python3.9/test","test_zoneinfo",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_zoneinfo","data",true,true);Module["FS_createPath"]("/lib/python3.9/test","ziptestdata",true,true);Module["FS_createPath"]("/lib/python3.9/test","test_asyncio",true,true);Module["FS_createPath"]("/lib/python3.9/test","test_email",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_email","data",true,true);Module["FS_createPath"]("/lib/python3.9/test","test_json",true,true);Module["FS_createPath"]("/lib/python3.9/test","test_peg_generator",true,true);Module["FS_createPath"]("/lib/python3.9/test","test_tools",true,true);Module["FS_createPath"]("/lib/python3.9/test","test_warnings",true,true);Module["FS_createPath"]("/lib/python3.9/test/test_warnings","data",true,true);function processPackageData(arrayBuffer){assert(arrayBuffer,"Loading data file failed.");assert(arrayBuffer instanceof ArrayBuffer,"bad input to processPackageData");var byteArray=new Uint8Array(arrayBuffer);var curr;var compressedData={data:null,cachedOffset:9181054,cachedIndexes:[-1,-1],cachedChunks:[null,null],offsets:[0,72,102,332,547,764,994,1209,1426,1656,1871,2088,2318,2533,2750,2980,3195,3412,3642,3857,4074,4304,4519,4736,4966,5181,5398,5628,5843,6060,6290,7503,8937,10105,11099,12047,12937,14005,15009,16139,17243,18164,19305,20320,21364,22174,23207,24364,25353,26326,26973,27929,28717,29819,30873,31985,33209,34308,35264,36133,37060,38104,39223,40140,40986,42128,42956,43865,44533,45296,46378,47621,48753,49956,51277,52440,53504,54648,55435,56460,57610,58749,59625,60807,61872,62931,64145,65163,66336,67404,68479,69450,70623,71822,72823,73760,74909,75965,77167,78304,79430,80567,81695,82857,83923,84781,85893,87019,88162,89098,90049,91232,92289,93432,94414,95527,96843,97713,98649,99688,100412,101495,102575,103749,105797,107306,108650,110404,112291,113651,115699,117747,119795,121849,123896,125943,127818,129866,131472,133073,135121,137169,139226,141024,141981,142820,143355,143953,144809,145927,147023,148328,149379,150401,151918,153507,154640,155872,157087,158444,160091,161543,162973,164394,165773,167343,168775,170280,171491,173039,173933,174870,175524,176405,177225,178298,179365,179881,180402,181157,182110,183147,184224,185274,186239,187175,188184,189258,190057,190963,191861,192751,193700,194760,195756,196662,197588,198599,199512,200537,201569,202564,203451,204512,205521,206536,207548,208552,209605,210586,211609,212539,213503,214423,215388,216395,217031,218325,219500,220266,221703,223369,224496,225400,226130,227204,228906,230249,231094,232005,232774,234403,235990,236923,237685,238432,240112,241631,242638,243552,244308,245714,247189,248353,249222,249957,250839,252557,253797,254845,255672,256493,257858,259554,261114,262105,262816,263983,265718,267198,268224,269107,270053,271826,273175,274612,276219,277340,278679,280182,281103,282709,284029,284927,286591,287642,288767,290343,291225,292789,294082,295e3,296719,297711,298762,299729,300874,301893,302915,303539,304843,305778,306911,308003,308919,309705,310470,311507,312461,313370,314244,315073,315862,316595,317316,318222,319012,319819,320586,321653,322679,323425,324233,325227,326364,327560,328695,329675,330638,331457,332586,333682,334858,335942,336871,337851,338658,339578,340615,341644,342174,342847,343563,344596,345507,346429,347322,348156,349181,350221,351121,352033,352976,354103,355094,356041,356955,357689,358831,359632,360201,360751,361891,362701,363595,364707,365714,366526,367536,368359,369212,370181,371081,372002,372906,373814,374932,375816,376816,377805,378630,379601,380671,381859,382958,383869,384898,385913,386990,388031,389011,390288,391691,392821,393965,395253,396359,397344,398291,399164,400163,401406,402295,402921,403800,404897,405644,406505,407657,408860,410049,411031,412041,413064,414241,415305,415995,416850,417645,418425,419081,420359,421582,422875,424301,426098,427924,429724,431625,433371,435138,436888,438700,439948,441129,442140,443279,444559,446607,448095,449402,451183,452963,454148,455359,456595,457599,459646,461577,463631,465557,467610,469540,471593,473187,474518,476288,478271,480310,481703,483150,484939,486639,488099,489897,490919,491762,492545,493047,493854,494816,495696,496537,497742,498847,499724,500791,501751,502542,503476,504612,505477,506560,507323,508190,509137,510028,510902,512062,513409,514666,515736,516733,517888,518978,519737,520487,521298,522082,522817,523701,524566,525422,526281,527332,528785,530004,531198,532450,533720,534980,536249,537506,538960,540161,541517,543148,544183,545204,546234,547343,548393,549268,550298,551285,552251,553132,553868,554530,554938,555361,556292,557305,558240,559271,560451,561741,563106,564291,565288,566623,567622,568814,570115,571070,571875,572731,573690,574851,575842,577474,579527,581287,582758,584689,586031,587589,589382,590723,591827,592648,593581,594725,595863,596925,598043,599139,600258,601441,602717,603725,604683,605407,606215,606791,607689,608469,609200,609967,610560,611504,612627,613553,614306,615040,615725,616396,617102,617857,618986,619809,620557,621247,622179,623133,623702,624411,625200,626200,627410,628539,629544,630642,631767,632834,633765,634656,635692,636739,637717,638578,639434,640319,641200,642075,642984,644061,645243,646267,647394,648088,649110,650468,651613,652439,653610,654807,655860,656777,657709,658582,659590,660557,661447,662351,663219,664053,664988,665944,666970,667979,668905,669793,670762,671768,672711,673581,674404,675224,676169,677143,678123,679110,680009,680940,681818,682768,683738,684663,685642,687025,688514,690429,692477,693621,694065,694672,695715,696794,697898,699133,700307,701390,702476,703495,704477,705672,706848,708663,710486,712304,714147,715994,717848,719685,721519,723253,724986,726717,728219,729410,730358,731043,731841,732598,733441,734064,734730,735392,736223,737279,738466,739680,741424,742789,744753,745642,746555,747230,748219,748936,749689,750397,751507,752300,753327,753993,755332,756501,757789,759132,761062,763110,765148,766658,767988,769140,770335,771748,772709,773515,774402,775307,776130,776760,777570,778268,778992,779680,780353,780958,781511,782167,783035,783593,784277,784923,785617,786257,786771,787530,788423,789092,789635,790201,790807,791800,792665,793665,794344,794885,796348,797673,798876,800021,801314,802675,803857,804718,805648,806478,807501,808286,809233,809793,810606,811242,812259,813451,814489,815453,816421,817370,818161,818988,819734,820575,821303,822279,823212,823966,824639,825484,826355,827044,827779,828406,829102,829871,830660,831387,832254,832981,833656,834050,834615,835533,836162,836783,837732,838679,839751,840241,840901,841841,842523,843468,844334,845028,845706,846445,847270,847847,848500,849270,850230,851163,852522,853941,855327,856381,857223,857900,858572,859208,860100,861398,862448,863425,864389,865297,866092,866802,867597,868489,869256,870091,870971,871809,872619,873195,873913,874506,875095,875800,876606,877527,878395,879421,880495,881606,882619,883783,884741,885500,886635,887569,888284,889070,889855,890614,891441,892227,892993,893717,894693,895592,896248,897213,898163,899164,900068,901025,901951,902586,903280,904037,904902,905826,906847,907778,908725,909592,910439,911370,912049,912731,913330,914356,915085,915864,916784,917588,918631,919501,920516,921370,922198,922913,923829,924560,925505,926410,927270,928031,928930,929761,930815,931833,932294,932748,933485,934674,935907,936588,937344,938407,939192,939842,940550,941176,941857,942325,942758,943201,943859,944472,945170,945739,946554,947594,948581,949309,950185,951136,951816,952821,953437,954017,955324,956232,957411,958550,959702,960912,961976,962835,963399,963802,964684,965782,966581,967823,968621,969321,970136,971005,971575,972196,972861,973839,974609,975325,976048,976852,977692,978467,979296,980574,981759,982785,983644,984607,985502,986403,987266,988230,989248,990469,991183,991951,992816,993544,994439,995104,995622,996343,996822,997310,998040,999030,999862,1000519,1001023,1001945,1002638,1003357,1004096,1004846,1005662,1006332,1007055,1007943,1008680,1009627,1010548,1011785,1012911,1013642,1014776,1015892,1017185,1018250,1019311,1020222,1021465,1022525,1023511,1024275,1025148,1026263,1027210,1028283,1029249,1030122,1031055,1032208,1032729,1033343,1034048,1034686,1035293,1035944,1036737,1037612,1038301,1039105,1039714,1040492,1040932,1041858,1042684,1043579,1044309,1044729,1045292,1046317,1047151,1048159,1048922,1049874,1050776,1051490,1052345,1053471,1054514,1055871,1056933,1057888,1058509,1059478,1060717,1061742,1062703,1063908,1065211,1066509,1067648,1068742,1069868,1070899,1071970,1073105,1074229,1075500,1076316,1076865,1077885,1078682,1079262,1079846,1080738,1081755,1082535,1083207,1084482,1085477,1087013,1088391,1089106,1089787,1090647,1091266,1092370,1093359,1094272,1095199,1096393,1097171,1098079,1099154,1100048,1100932,1101682,1102744,1103456,1104238,1105080,1105764,1107063,1108117,1109060,1110003,1111004,1111979,1112548,1112965,1113996,1115047,1116189,1117344,1117919,1118674,1119421,1120299,1120933,1121933,1122485,1122848,1123431,1124340,1125240,1125913,1126984,1127620,1128237,1128929,1129695,1130546,1131659,1132888,1134136,1135312,1136357,1137518,1138689,1139883,1140875,1141942,1143046,1144107,1145466,1146614,1147609,1148301,1149260,1150383,1151426,1152257,1153107,1153805,1154689,1155516,1156406,1157190,1157872,1158712,1159394,1160215,1160940,1161546,1162404,1163215,1164094,1165055,1165944,1166862,1167522,1168367,1169089,1170087,1171070,1171784,1172657,1173390,1174345,1175394,1176392,1177070,1177778,1178679,1179828,1180572,1181516,1182293,1183217,1184037,1184871,1185632,1186635,1187306,1187730,1188365,1189005,1189511,1190103,1190608,1191143,1191814,1192528,1193271,1194194,1194928,1195461,1196327,1197063,1198021,1198648,1199371,1200322,1201584,1202804,1203673,1204601,1205423,1206413,1207322,1208490,1209422,1210453,1211335,1212272,1213186,1214163,1215253,1216181,1216968,1217785,1218780,1219691,1220570,1221624,1222281,1223280,1224057,1224958,1225969,1226797,1227770,1228971,1229837,1230804,1231944,1232731,1234003,1235259,1236309,1237428,1238549,1239234,1239830,1241063,1241841,1242724,1243199,1244137,1245070,1246052,1246919,1247597,1248362,1249035,1249853,1250568,1251142,1251894,1252662,1253269,1254354,1255011,1255705,1256612,1257342,1258202,1259168,1260110,1260874,1261808,1262907,1263715,1264508,1265462,1266267,1267322,1268033,1269007,1270176,1271028,1271986,1273368,1274873,1275568,1276209,1276985,1277738,1278252,1278851,1279849,1280790,1281717,1282734,1283716,1284647,1285581,1286363,1287038,1287800,1289111,1290613,1291818,1293137,1294083,1295241,1296335,1297290,1297931,1299141,1299888,1300606,1301547,1302225,1302830,1303477,1304131,1304723,1305384,1306165,1307056,1307933,1309036,1309878,1310756,1311562,1312551,1313463,1314118,1314934,1315823,1316714,1317594,1318622,1319605,1320065,1320678,1321564,1322089,1322884,1323865,1324610,1325266,1326170,1327206,1328002,1329236,1330236,1331027,1331911,1332674,1333604,1334326,1335505,1336407,1337209,1338035,1339278,1340423,1341641,1342489,1343405,1344517,1345538,1346734,1347782,1348794,1349960,1351085,1352210,1353160,1354207,1354984,1355837,1356838,1357862,1358935,1359992,1360767,1361311,1361760,1362279,1363242,1364283,1365308,1366163,1367099,1368251,1369124,1370109,1370930,1371847,1372687,1373470,1374159,1375329,1376451,1377615,1378900,1380031,1380697,1381747,1382819,1383723,1384653,1385256,1386079,1386863,1387753,1388756,1389824,1391154,1392352,1393515,1394842,1396137,1397414,1398449,1399738,1400843,1401999,1403110,1404186,1405243,1406109,1407186,1408080,1409075,1410364,1411588,1412457,1413456,1414361,1415012,1415505,1416176,1416909,1417786,1418935,1419900,1421002,1422067,1422908,1423879,1425062,1425920,1427261,1428248,1429203,1430438,1431280,1432482,1433277,1434146,1435048,1436180,1437275,1438376,1439396,1440493,1441174,1441811,1442465,1443191,1443974,1444900,1445683,1446423,1447253,1448069,1448869,1449623,1450684,1451529,1452460,1453270,1454252,1455119,1455896,1456746,1457383,1458141,1458922,1459776,1461108,1462179,1463060,1463946,1464645,1465515,1466264,1466969,1467739,1468714,1469477,1470108,1471184,1472167,1472891,1473750,1474604,1475707,1476122,1476847,1477623,1478445,1479188,1480683,1482054,1483325,1484324,1485367,1486446,1486952,1487768,1488634,1489673,1490636,1491643,1492383,1493197,1494210,1494688,1495031,1495663,1496328,1497136,1497985,1498898,1499856,1500682,1501506,1502261,1503131,1504199,1505715,1506769,1507639,1508640,1509516,1510354,1511297,1512345,1513364,1514149,1515004,1516103,1517089,1518417,1519323,1520079,1520797,1521860,1522859,1523685,1524694,1525636,1526651,1527502,1528300,1528916,1529692,1530642,1531496,1532602,1533973,1535028,1536121,1537136,1538108,1538910,1539776,1540587,1541364,1542105,1542917,1543744,1544522,1545470,1546168,1547253,1547892,1548891,1549872,1550662,1551499,1551953,1552765,1553790,1554644,1555508,1556452,1557536,1558262,1559349,1560407,1561265,1562321,1563011,1564069,1564938,1566019,1566968,1568217,1568958,1570022,1570733,1571552,1572370,1573477,1574478,1575712,1576785,1578091,1579162,1580249,1581370,1581825,1582178,1582693,1583806,1584795,1585659,1586639,1587681,1588615,1589310,1590208,1591287,1592303,1593382,1594436,1595139,1596073,1596815,1597758,1598775,1599721,1600754,1601727,1602594,1603537,1604484,1605577,1606362,1607341,1608137,1608550,1609402,1610216,1611135,1612056,1612970,1614052,1614715,1615307,1616165,1617166,1618398,1619409,1620478,1621634,1622767,1624017,1624821,1625985,1626860,1627532,1628433,1629349,1630283,1631469,1632585,1633629,1634685,1635663,1636724,1637464,1638566,1639283,1640156,1640674,1641433,1642521,1643538,1644426,1645281,1645870,1646600,1647702,1648490,1649391,1650403,1651318,1652326,1653255,1654120,1654894,1655775,1656621,1657398,1658165,1659028,1659959,1661041,1662068,1663270,1664065,1664983,1665825,1666771,1667804,1668977,1669797,1670681,1671549,1672380,1672874,1673337,1673915,1674893,1675639,1676397,1677349,1678111,1678890,1679941,1681212,1681976,1682696,1683487,1684304,1685253,1686246,1687078,1687927,1688770,1689542,1690181,1690976,1691493,1692198,1693135,1693912,1694804,1695715,1696631,1697556,1698296,1699207,1700109,1700873,1701781,1702502,1703396,1704330,1705401,1706171,1707176,1708083,1708771,1709594,1710666,1711619,1712398,1713364,1714431,1715310,1715904,1716640,1717596,1718371,1719113,1719654,1720370,1721062,1721804,1722662,1723585,1724440,1725269,1726391,1726707,1727111,1727585,1728238,1729233,1730093,1731010,1732013,1732730,1733627,1734445,1735179,1735999,1736961,1737703,1738530,1739198,1740035,1740850,1741660,1742343,1743185,1743891,1744420,1745223,1746231,1747071,1747988,1748910,1749970,1751125,1752290,1753005,1754035,1755185,1756069,1757076,1758005,1758855,1759697,1760449,1761288,1762129,1762930,1763789,1764676,1765472,1766238,1767230,1768140,1768823,1769549,1770192,1770992,1771644,1772707,1773432,1774172,1775146,1775925,1777205,1778424,1779222,1779868,1780732,1781544,1782307,1782947,1783666,1784330,1785197,1785830,1786661,1787501,1788279,1789070,1789835,1790772,1791634,1792573,1793829,1794664,1795286,1796562,1797401,1798374,1799315,1800268,1800938,1801601,1802269,1803204,1804113,1805009,1805772,1806776,1807502,1808142,1808932,1809820,1810778,1811824,1812594,1813507,1814403,1815222,1815985,1816830,1817738,1818626,1819422,1820407,1821243,1822252,1823011,1823716,1824670,1825395,1826228,1826897,1827697,1828653,1829522,1830295,1831147,1831904,1832876,1833872,1834848,1835591,1836493,1837270,1837989,1838624,1839715,1840520,1841175,1842306,1843593,1844562,1845624,1846942,1848016,1848814,1849901,1850773,1851785,1852956,1854081,1854823,1855868,1856603,1857807,1859103,1860524,1861459,1862275,1863020,1864222,1865346,1866056,1867116,1867994,1868906,1869531,1870409,1871378,1872219,1872972,1873937,1874693,1875543,1876601,1877275,1877976,1878743,1879295,1879904,1880952,1881912,1882729,1883790,1884542,1885368,1886378,1887285,1887838,1888800,1889587,1890484,1891380,1891875,1892388,1892782,1893172,1893897,1894530,1895262,1896065,1896821,1897577,1898264,1898920,1899493,1900443,1901213,1902127,1902592,1903081,1903699,1904100,1904561,1905024,1905491,1906036,1906513,1906968,1907779,1908495,1909397,1910424,1911237,1912029,1912778,1913671,1914567,1915304,1916185,1916983,1917866,1918784,1919561,1920182,1921050,1922020,1922622,1923562,1924418,1925530,1926467,1927012,1927871,1928773,1929549,1930284,1931145,1931844,1932781,1933713,1934655,1935243,1936364,1937416,1938270,1939215,1940474,1941701,1942757,1943782,1944883,1946039,1946955,1947846,1948804,1949661,1950654,1951674,1952507,1953482,1954265,1955226,1955982,1956720,1957523,1958412,1959035,1959889,1960660,1961805,1962753,1963822,1964928,1966149,1967209,1967911,1968586,1969448,1970407,1971319,1972256,1973266,1974166,1975066,1975961,1976851,1977652,1978204,1979265,1980091,1980961,1981983,1982985,1983790,1984473,1985259,1986092,1987081,1987944,1988744,1989753,1990731,1991644,1992380,1993230,1994101,1995142,1996237,1997136,1998072,1998852,1999808,2000856,2001697,2002739,2003834,2004797,2005745,2006594,2007481,2008139,2008846,2009475,2010189,2011188,2012310,2012703,2013402,2013912,2014886,2015785,2016659,2017609,2018327,2019449,2020462,2021391,2022177,2022945,2024151,2025095,2025880,2026929,2028007,2028969,2029644,2030562,2031714,2032601,2033553,2034556,2035386,2036430,2037467,2038237,2039367,2040446,2041545,2042400,2043058,2044220,2045331,2046298,2047034,2048059,2049226,2050445,2051448,2052315,2053586,2054756,2055968,2057148,2058326,2059247,2060653,2061577,2062536,2063558,2064388,2065129,2065926,2067074,2068077,2069e3,2069778,2070797,2071594,2072493,2073554,2074772,2075667,2076598,2077474,2078293,2079090,2080190,2081124,2082236,2083280,2084443,2085294,2086003,2086849,2087560,2088375,2089246,2089965,2090643,2091270,2091719,2092569,2093329,2094121,2095169,2096073,2096968,2097879,2098870,2099977,2100893,2101947,2103048,2103837,2104753,2105194,2105613,2106110,2106458,2107020,2107325,2108123,2108596,2109023,2109524,2109956,2110478,2111042,2111605,2112015,2112567,2112976,2113518,2114115,2114671,2115046,2115581,2116004,2116511,2116914,2117635,2118248,2118828,2119460,2119807,2120140,2120505,2120847,2121184,2121733,2122389,2123e3,2123451,2124118,2124599,2125086,2125696,2126169,2126664,2127140,2127767,2128252,2128772,2129258,2130232,2131001,2131849,2132608,2133462,2134110,2134891,2136046,2137028,2137782,2138802,2139798,2140859,2141981,2142731,2143392,2144018,2144644,2145253,2145836,2146533,2147134,2147817,2148407,2149250,2150179,2151341,2152245,2153475,2154224,2155341,2156124,2157239,2158407,2159553,2160533,2161871,2162925,2164007,2165090,2165899,2166747,2167573,2168534,2169445,2170369,2171651,2172568,2173428,2174172,2175219,2175968,2176549,2177550,2178788,2179918,2180829,2181825,2182576,2183281,2184341,2185166,2186107,2187084,2188271,2189559,2190387,2191365,2192414,2193599,2194828,2196149,2197248,2198132,2199325,2200591,2201813,2203264,2204460,2205602,2206751,2207849,2209180,2210364,2211430,2212572,2213675,2214590,2215395,2216206,2216987,2217585,2218187,2218940,2219712,2220608,2221419,2222471,2222934,2223403,2223848,2224893,2225618,2226565,2227584,2228638,2229651,2230711,2231816,2232881,2233954,2234812,2235580,2236579,2237628,2238444,2239264,2240148,2240884,2241368,2242468,2243251,2244016,2244680,2245406,2245856,2246719,2247618,2248354,2249154,2249840,2250473,2251192,2251985,2252432,2253177,2253934,2255049,2256075,2257202,2258053,2259100,2260351,2261256,2262247,2263094,2263964,2264822,2265615,2266766,2268158,2269249,2270346,2271443,2272510,2273541,2274513,2275621,2276848,2277783,2278619,2279449,2280431,2281435,2282111,2282911,2283522,2284269,2285119,2286143,2287101,2288171,2289129,2290109,2290947,2291831,2292495,2293266,2294211,2295331,2296384,2297351,2298493,2299573,2300712,2301590,2303191,2304039,2304862,2305576,2306680,2307716,2308820,2309704,2310506,2311056,2312004,2313005,2314013,2315103,2316057,2316948,2317815,2318449,2318964,2319988,2320713,2321731,2322563,2323557,2324368,2325438,2326354,2327130,2328226,2329269,2330347,2331203,2332305,2333434,2334461,2335323,2336295,2337224,2338011,2338784,2339548,2340716,2341266,2342656,2343815,2344784,2345472,2346362,2347280,2348163,2349072,2350108,2351158,2352223,2353167,2354223,2355222,2356333,2357219,2358289,2359402,2360336,2361415,2362572,2363776,2364458,2365880,2367216,2368438,2369340,2370113,2371063,2372085,2372931,2373528,2374415,2375648,2376448,2377467,2378426,2379375,2380364,2381452,2382300,2382871,2383777,2384770,2385806,2386540,2387348,2388181,2388924,2389877,2390895,2392119,2393074,2393769,2394718,2395765,2396536,2397470,2398364,2399469,2400533,2401780,2402813,2403588,2404398,2405371,2406185,2407198,2407969,2408843,2409942,2410905,2411660,2412538,2413460,2414134,2414750,2415210,2415645,2416169,2416695,2417459,2418285,2419229,2420097,2421057,2422108,2422885,2423622,2424350,2424850,2425936,2426882,2427484,2427966,2428497,2429162,2429549,2429877,2430228,2430852,2431845,2432931,2433732,2434746,2435827,2436900,2438158,2439183,2439964,2440463,2441033,2441416,2442315,2443372,2444314,2445166,2445948,2446696,2447826,2448801,2449563,2450204,2451122,2451819,2452890,2453697,2454392,2454919,2455720,2456368,2457210,2458205,2459246,2460149,2461218,2462462,2463976,2464913,2465573,2466226,2466953,2467403,2468096,2468931,2469811,2470343,2471190,2472247,2473169,2474052,2474683,2475653,2476645,2477541,2478283,2479156,2479753,2480555,2481640,2482740,2483860,2484992,2486062,2487301,2488215,2489253,2490293,2491040,2492084,2493027,2493881,2494663,2495655,2496619,2497552,2498580,2499658,2500667,2501543,2502412,2503534,2504334,2505457,2506419,2507260,2508075,2508998,2510125,2511251,2512457,2513391,2514448,2515123,2515784,2516525,2517577,2518635,2519297,2520007,2521003,2521810,2522781,2523738,2524591,2525468,2526379,2526841,2527550,2528015,2528852,2529718,2530404,2531649,2532624,2533615,2534401,2535371,2536323,2537275,2538152,2539071,2540039,2540995,2541361,2542026,2542936,2543829,2544830,2545435,2545986,2546809,2547466,2548493,2549498,2550591,2551456,2552458,2553467,2554268,2555091,2556127,2557031,2558334,2559305,2560021,2561151,2562419,2563634,2564749,2565642,2566447,2567765,2568842,2569770,2570314,2571171,2572246,2573492,2574816,2575773,2576918,2578098,2579350,2580580,2581944,2583322,2584709,2586113,2587441,2588695,2589989,2590981,2592232,2593171,2594394,2595347,2596198,2597442,2598305,2599155,2599991,2600910,2602101,2603133,2603779,2604996,2606105,2607016,2607909,2608655,2609489,2610597,2611350,2612297,2613443,2614485,2615685,2617070,2618111,2619542,2620956,2622569,2623721,2624786,2626177,2627492,2628683,2629972,2631061,2632260,2633649,2634603,2635812,2636782,2637862,2639134,2640547,2641772,2642801,2643733,2644515,2645218,2646146,2647047,2647799,2648514,2649397,2650107,2651111,2651718,2652539,2653200,2653936,2654800,2655846,2656610,2657548,2658621,2659209,2660237,2661157,2662370,2663515,2664882,2665721,2666284,2666874,2667588,2668196,2668805,2669484,2670228,2670952,2671702,2672450,2673171,2673820,2674590,2675170,2675814,2676381,2676958,2677543,2678065,2679251,2680397,2681209,2681722,2682743,2683392,2684350,2685542,2686582,2687734,2688702,2690510,2691979,2693664,2694768,2695840,2696823,2697844,2698366,2698798,2699231,2700141,2700903,2701488,2701952,2702450,2703004,2703940,2704756,2705947,2707038,2708352,2709255,2710133,2710913,2711885,2712664,2713489,2714618,2715815,2716567,2717413,2718589,2719492,2720496,2721354,2722275,2723193,2723910,2724819,2725826,2726945,2728055,2728504,2729126,2729922,2730741,2731777,2732821,2733744,2734334,2735226,2736359,2737288,2738307,2739277,2739615,2740576,2741421,2742545,2743475,2744291,2745274,2746345,2747482,2748693,2749818,2750777,2751692,2752596,2753726,2754696,2755689,2756702,2757612,2758355,2758903,2759704,2760789,2761657,2763087,2764087,2765250,2766471,2767403,2768608,2769964,2771072,2771859,2772736,2773679,2774572,2775474,2776248,2777307,2778452,2779697,2780432,2781252,2782457,2783521,2784555,2785766,2787149,2788441,2789168,2789831,2791130,2792194,2793170,2794243,2795153,2796130,2796936,2797884,2798934,2799488,2800517,2801717,2802834,2803937,2804939,2805912,2806753,2807636,2808441,2808976,2809768,2810849,2812039,2812968,2814024,2814845,2815618,2816687,2817728,2818673,2819474,2820493,2821463,2822427,2823520,2824375,2825183,2826084,2827009,2828096,2829024,2830174,2831106,2832091,2833107,2834239,2835440,2836953,2837962,2838997,2839893,2840945,2841834,2842541,2843557,2844363,2844899,2845996,2847015,2847917,2848604,2849386,2850032,2850791,2851433,2852453,2853585,2854444,2855196,2856200,2856939,2858056,2859060,2859781,2860894,2861868,2862863,2863732,2864809,2865812,2866778,2867677,2868643,2869694,2870643,2871440,2872203,2873013,2874266,2875490,2876662,2877430,2878267,2879312,2880197,2881065,2882054,2883252,2884011,2884649,2885555,2886582,2887576,2888577,2889417,2890330,2891342,2892227,2892763,2893912,2894888,2895947,2896967,2898168,2899274,2900283,2901254,2902382,2903562,2904594,2905372,2906359,2907425,2908165,2909018,2910025,2911256,2912345,2913184,2913728,2914628,2915569,2916860,2918068,2918948,2919853,2920641,2921877,2922802,2923511,2924425,2925282,2926231,2927482,2928104,2928800,2929817,2930792,2931655,2932109,2932854,2933731,2934973,2936252,2937440,2938453,2939740,2940587,2941585,2942405,2943303,2944198,2945050,2946198,2947418,2948463,2949393,2950421,2951186,2952178,2953075,2953952,2954911,2955868,2956772,2957994,2958900,2960151,2961032,2962185,2963313,2964466,2965745,2966772,2967735,2968623,2969410,2970355,2971696,2972824,2973764,2974374,2975304,2976244,2977292,2977789,2978494,2979440,2980402,2981433,2982286,2983356,2984127,2985220,2985980,2986698,2987547,2988510,2989008,2989739,2990609,2991487,2992371,2993010,2993664,2994636,2995504,2996063,2996622,2997443,2998389,2999233,3000070,3001009,3001825,3002804,3003567,3004536,3005475,3006433,3007181,3007943,3008992,3009905,3010917,3011544,3012073,3012424,3013007,3013707,3014240,3014946,3015746,3016206,3017084,3017837,3018135,3018715,3019441,3020408,3021189,3021869,3022647,3023456,3024119,3024738,3025784,3026774,3027669,3028580,3029403,3030605,3031647,3032586,3033698,3034793,3035687,3036307,3037005,3037775,3038637,3039637,3040382,3041102,3041617,3042662,3043329,3043977,3044502,3045801,3047074,3047951,3048892,3049672,3050467,3051473,3052316,3052982,3054017,3054930,3055756,3056893,3057663,3058544,3059517,3060531,3061695,3062725,3063735,3064652,3065545,3066063,3066820,3067828,3068875,3069958,3071046,3071986,3073057,3073919,3075046,3076193,3077322,3078285,3079173,3080019,3080918,3081806,3082685,3083742,3084602,3085906,3087122,3088289,3089316,3090266,3091100,3091978,3092944,3093856,3094916,3095790,3096798,3097750,3098696,3099399,3100159,3101275,3102097,3103143,3104058,3104995,3106163,3107071,3107828,3108879,3110077,3111042,3111908,3112859,3113759,3114479,3115155,3116174,3117116,3118212,3119406,3120659,3121758,3122923,3124126,3125326,3126518,3127879,3129009,3129978,3130863,3131587,3132416,3133071,3133677,3134324,3134989,3135901,3136886,3137596,3138322,3139200,3140024,3140690,3141611,3142349,3143088,3143808,3144476,3144837,3145451,3146059,3146758,3147575,3148192,3148921,3149427,3149967,3150484,3151119,3151700,3152384,3153079,3153492,3153964,3154643,3155252,3156065,3156642,3157084,3157398,3157961,3158729,3159490,3160108,3160785,3161410,3161991,3162507,3163200,3163913,3164777,3165656,3166339,3167220,3168509,3169401,3170324,3170964,3171831,3172964,3174013,3174886,3175741,3176444,3177333,3178090,3179017,3179991,3180941,3182041,3182808,3183700,3184600,3185549,3186640,3187966,3189431,3190059,3190768,3192181,3193222,3194145,3194987,3195762,3196810,3197839,3198899,3199884,3200969,3201934,3202765,3203618,3204450,3205404,3206316,3207226,3208169,3208997,3209742,3210587,3211585,3212691,3213685,3214747,3215624,3216348,3217158,3218116,3218816,3219718,3220576,3221465,3222467,3223267,3224134,3225151,3226011,3226882,3227816,3228476,3229056,3229777,3230729,3231845,3232856,3234231,3235458,3236580,3237941,3239170,3240397,3241276,3241949,3243102,3243873,3244536,3245280,3246195,3247439,3248373,3249346,3250528,3251932,3253217,3254203,3255213,3256022,3257150,3258319,3258877,3259971,3261210,3262118,3263496,3264550,3265409,3266161,3267083,3267943,3268510,3269529,3270237,3270808,3271846,3272836,3273900,3275272,3276217,3277396,3278199,3279012,3279956,3281269,3282333,3283398,3284347,3285510,3286728,3287928,3289218,3290391,3291642,3292815,3293836,3294961,3295985,3297028,3297880,3298805,3299666,3300628,3301558,3302585,3303717,3305033,3306192,3307357,3308415,3309568,3310630,3311824,3313025,3314055,3314611,3315199,3315821,3316538,3317084,3317713,3318300,3319214,3319827,3320712,3321394,3321894,3322635,3323307,3324485,3325297,3326193,3326981,3328137,3329119,3330108,3330905,3332057,3333055,3334211,3334802,3335359,3336056,3336573,3337416,3338463,3339406,3340388,3341045,3342046,3343128,3344057,3344996,3345816,3346607,3347281,3348124,3349229,3350051,3350982,3351949,3352916,3353813,3354565,3355597,3356532,3357792,3358871,3359862,3360933,3362263,3363679,3365181,3366458,3367636,3368662,3369715,3370575,3371597,3372438,3373454,3374597,3375595,3376159,3376824,3377600,3378670,3379815,3381030,3381922,3383037,3383842,3384693,3385414,3386219,3386863,3387399,3388482,3389445,3390145,3390701,3391464,3392281,3392920,3393865,3394720,3395268,3395839,3396706,3397246,3397704,3398513,3398987,3399483,3400374,3400993,3401602,3402360,3403048,3403704,3404533,3405446,3406181,3407222,3407993,3409537,3411104,3412658,3414261,3415867,3417463,3419064,3420637,3422234,3423805,3425408,3426999,3428579,3430152,3431762,3433365,3434912,3436066,3437080,3438100,3438873,3439565,3440302,3441182,3441927,3442704,3443416,3444480,3445014,3445690,3446681,3447500,3448380,3449263,3450457,3451785,3453050,3453825,3454950,3456157,3457226,3458071,3458851,3459897,3460774,3461845,3462551,3463527,3464120,3464981,3465933,3466768,3467509,3468159,3468937,3469681,3470255,3471204,3471901,3472822,3473666,3474812,3476039,3477191,3478408,3479511,3480662,3481866,3482981,3483951,3484999,3486140,3487446,3488567,3489589,3490457,3491370,3491934,3493432,3494837,3495927,3497240,3498198,3498867,3499410,3500085,3500874,3501769,3502545,3503303,3504619,3505728,3506621,3507746,3508712,3509637,3510685,3511594,3512407,3513122,3514086,3514682,3515170,3515784,3516900,3517487,3518584,3519319,3520118,3521053,3522352,3523370,3524327,3525042,3525871,3526778,3527586,3528208,3529389,3530252,3531047,3532010,3532710,3533401,3534012,3534576,3535262,3535848,3536924,3538046,3538749,3539713,3540262,3540846,3541917,3542833,3543882,3544873,3545701,3546546,3547436,3548491,3549460,3550368,3551401,3552347,3553382,3554320,3555321,3556543,3557568,3558535,3559427,3560181,3561338,3562494,3563502,3564116,3564779,3565589,3566262,3566940,3567547,3568414,3569186,3570054,3570956,3571928,3572718,3573565,3574395,3575452,3576522,3577151,3577707,3578366,3578895,3579803,3580561,3581558,3582120,3582883,3583937,3584828,3585881,3586607,3587466,3588359,3589384,3590562,3591646,3592389,3593322,3594422,3595210,3596349,3597229,3598164,3599137,3600035,3600920,3601970,3603132,3604071,3605145,3605953,3606839,3607585,3608250,3609354,3610283,3611066,3611803,3612686,3613594,3614662,3615737,3616711,3617750,3618847,3619513,3620342,3621156,3621923,3622819,3623862,3624975,3626192,3627459,3628372,3628929,3629645,3630444,3630928,3631768,3632628,3633512,3634360,3635494,3636230,3636944,3637781,3638892,3639508,3640460,3641743,3642917,3643917,3644892,3646077,3647359,3648318,3649681,3650871,3651894,3652824,3653816,3655037,3656123,3656822,3657831,3658822,3659629,3660703,3661476,3662246,3663038,3664240,3665133,3666125,3667419,3668076,3668988,3669552,3670767,3671709,3672870,3674208,3675170,3675863,3676386,3677002,3677767,3678547,3679173,3679902,3680730,3681433,3682175,3683003,3683609,3684290,3685223,3686293,3687155,3687988,3688991,3690225,3691337,3692080,3693066,3693645,3694272,3694846,3695460,3696140,3696920,3697866,3698674,3699443,3700324,3701041,3702239,3703211,3704258,3705226,3706002,3706703,3707480,3708488,3709348,3710352,3710952,3711868,3712383,3713149,3713913,3714767,3715625,3716590,3717486,3718395,3719243,3720352,3721322,3722331,3723402,3724228,3725012,3726321,3727331,3728534,3729563,3730373,3731194,3731949,3732688,3733458,3734600,3735618,3736519,3737552,3738597,3739700,3740422,3741573,3742611,3743696,3744865,3746310,3747472,3748763,3749968,3751058,3752236,3753189,3754263,3755312,3756618,3757705,3758466,3759582,3760541,3761518,3762602,3763581,3764586,3765492,3766578,3767454,3768514,3769543,3770577,3771839,3772832,3773891,3774949,3775735,3776749,3778009,3778965,3780027,3780840,3781807,3782979,3783730,3784753,3785445,3786565,3787736,3788946,3790166,3791261,3792049,3792979,3793902,3795107,3796146,3797235,3798508,3799620,3800811,3801422,3802058,3803244,3804400,3805586,3806511,3807588,3808547,3809481,3810387,3811532,3812372,3813188,3813959,3814708,3815781,3816752,3817807,3818630,3819606,3820402,3821368,3822289,3823285,3824415,3825792,3826925,3828122,3829256,3830499,3831072,3831622,3832230,3832667,3833169,3833600,3834319,3835132,3835929,3836975,3837627,3838317,3838951,3839907,3840697,3841822,3842718,3843838,3844817,3845693,3846469,3847465,3848310,3849314,3849871,3850854,3851476,3851991,3852525,3853107,3853627,3854315,3855103,3855926,3856602,3857317,3857858,3858345,3858896,3859403,3859872,3860398,3860868,3861533,3862332,3863405,3864432,3865272,3866043,3867149,3868046,3869063,3869865,3870922,3871845,3872663,3873636,3874452,3875413,3876568,3877279,3877984,3878702,3879644,3880512,3881284,3882255,3883246,3884030,3885199,3886321,3887204,3887878,3888944,3889620,3890544,3891528,3892389,3893183,3894152,3894992,3895718,3896546,3897361,3898163,3899014,3899848,3900648,3901493,3902503,3903589,3904462,3905572,3906507,3907481,3908380,3909298,3910531,3911644,3912523,3913427,3914351,3915100,3916086,3916769,3917587,3918596,3919625,3920486,3921356,3922226,3922946,3923853,3924678,3925446,3926544,3927566,3928669,3929477,3930485,3931343,3932117,3932893,3933871,3934858,3935872,3936642,3937412,3938261,3939123,3939945,3941079,3942078,3943304,3943983,3944755,3945690,3946841,3947766,3948539,3949276,3950403,3951254,3952194,3953054,3953751,3954990,3956151,3957199,3958250,3959198,3959994,3961125,3961836,3962838,3963543,3964769,3966012,3967224,3968030,3969342,3970589,3972121,3973530,3973892,3974276,3974649,3975002,3975624,3976680,3977514,3978361,3979003,3979627,3980499,3981235,3982287,3983171,3984224,3985354,3986470,3987597,3988567,3989947,3991204,3992372,3993372,3994410,3995491,3996589,3997725,3998709,3999705,4000633,4001069,4001832,4002446,4003077,4003735,4004489,4005519,4006405,4007784,4008571,4009623,4010566,4011285,4012202,4013188,4013993,4014842,4015756,4016719,4017520,4018383,4019281,4020368,4021227,4022201,4023210,4024103,4025087,4026139,4027053,4027757,4028778,4029670,4030518,4031630,4032532,4033651,4034509,4035652,4036706,4037341,4038240,4039072,4039832,4040677,4041851,4042960,4043493,4044418,4044892,4045941,4046801,4047401,4047939,4048693,4049664,4050687,4051320,4052022,4052840,4053801,4054540,4055214,4055816,4056608,4057553,4058323,4059409,4060313,4061105,4062107,4062956,4064037,4064873,4065585,4066606,4067183,4067452,4067712,4067944,4068175,4068365,4068515,4069272,4070248,4071132,4071971,4072687,4073537,4074259,4075369,4076483,4077561,4078354,4079668,4080842,4082047,4083008,4083936,4084793,4085638,4086579,4087399,4088376,4089150,4090663,4092037,4093192,4094436,4095534,4096425,4097872,4098403,4099413,4100537,4101296,4102527,4103422,4104517,4105698,4106938,4107917,4109087,4109921,4110850,4112118,4113173,4114132,4115342,4116585,4117681,4118864,4119674,4120576,4121613,4122760,4123958,4125174,4126309,4127171,4128143,4129182,4130220,4131508,4132648,4133904,4134883,4136057,4137137,4138145,4139108,4139906,4140692,4141856,4142964,4143792,4144938,4145783,4146838,4147920,4148695,4149653,4150614,4151714,4152926,4153894,4154702,4155796,4156812,4157611,4158651,4159452,4160858,4161899,4162769,4163726,4164560,4165308,4166182,4167021,4167838,4168849,4170393,4171095,4171856,4172713,4173606,4174457,4175185,4175953,4176390,4177287,4178460,4179640,4180881,4182011,4182857,4183590,4184591,4185643,4186828,4187970,4189180,4190107,4191252,4192581,4193869,4195019,4196219,4197170,4198424,4199473,4200438,4201581,4202760,4203964,4204753,4205691,4206969,4207717,4208527,4208954,4209692,4210579,4211394,4212421,4213353,4213971,4215027,4215826,4216905,4217717,4218518,4219137,4219993,4220755,4221551,4222229,4222920,4223616,4224407,4225164,4225811,4226738,4227260,4228050,4228781,4229521,4230609,4231203,4231853,4232382,4233005,4233835,4234608,4235500,4236216,4236835,4237544,4238415,4239430,4240355,4240997,4242043,4242631,4243281,4244117,4244931,4245682,4246673,4247733,4248663,4249481,4250719,4251609,4252339,4253160,4254067,4254896,4255634,4256507,4257401,4258263,4259526,4260208,4261397,4262629,4263867,4265136,4266350,4267124,4267795,4268469,4269219,4269877,4270673,4271935,4272902,4273847,4275036,4276239,4277179,4278393,4279461,4280306,4281315,4282377,4283338,4284093,4284818,4285486,4286404,4287479,4288619,4289730,4290892,4291657,4292588,4293726,4294933,4295818,4296365,4297487,4298567,4299482,4300509,4301502,4302417,4303459,4304270,4305034,4306087,4307137,4307871,4308793,4309873,4310839,4311736,4312710,4313988,4314975,4316368,4317569,4318664,4319711,4320846,4321812,4322764,4323855,4324760,4325741,4326452,4327538,4328588,4329268,4329891,4331051,4331956,4333142,4333730,4334722,4335369,4336401,4337061,4338018,4338660,4339298,4340064,4340858,4341495,4342378,4343329,4344382,4345358,4346290,4347322,4348163,4348896,4349587,4350678,4351487,4352551,4353320,4354090,4355113,4356048,4356955,4357716,4358715,4359277,4360370,4361559,4362533,4363512,4364612,4365436,4366123,4367189,4368193,4369250,4370444,4371481,4372192,4373099,4373900,4374916,4375989,4377218,4378314,4379406,4380517,4381299,4382244,4383025,4383974,4385049,4385991,4386912,4387915,4388606,4389310,4390213,4391329,4392445,4393380,4394116,4394602,4395540,4396319,4396953,4397535,4398376,4399105,4399873,4400381,4400889,4401489,4402420,4403436,4404434,4405654,4406695,4407862,4408831,4410038,4410784,4411946,4413058,4414113,4415262,4415939,4416922,4417869,4418914,4420209,4421477,4422778,4423626,4424681,4425835,4426901,4427859,4428574,4429549,4430266,4431096,4432094,4433205,4433969,4434899,4435559,4436537,4437586,4438625,4439575,4440599,4441561,4442478,4443572,4444493,4445461,4446364,4447416,4448454,4449319,4450371,4451233,4452275,4453424,4454233,4455091,4455936,4456836,4457608,4458530,4459478,4460180,4461200,4462318,4463367,4464318,4465256,4466355,4467474,4468587,4469546,4470717,4471927,4472820,4473907,4474961,4475943,4476939,4478154,4479109,4480215,4481337,4482290,4483379,4484462,4485080,4486194,4487015,4487983,4489316,4490541,4491658,4492861,4494088,4495336,4496508,4497419,4498453,4499498,4500677,4501712,4502843,4504223,4505194,4506373,4507232,4508553,4509462,4510170,4511118,4512302,4513246,4513823,4514842,4515769,4516670,4517421,4518043,4518716,4519434,4520002,4520673,4521313,4521931,4522408,4523064,4523876,4524706,4525663,4526456,4527601,4528643,4529385,4530700,4531885,4532605,4533615,4534328,4535398,4536447,4537413,4538595,4539747,4540754,4542071,4543209,4544291,4545217,4546443,4547343,4548037,4548964,4549959,4550791,4551793,4552979,4553867,4555026,4556307,4557510,4558339,4559519,4560095,4561115,4562390,4563770,4564814,4565438,4566539,4567690,4568797,4569764,4570890,4572033,4572966,4573709,4574767,4575913,4576871,4577942,4578980,4579909,4580939,4581789,4583019,4584043,4585146,4586175,4587151,4588261,4589162,4590079,4590862,4591879,4593050,4593857,4594809,4595796,4596767,4597638,4598615,4599524,4600405,4601250,4601958,4602730,4603709,4604594,4606058,4607301,4608377,4609449,4610359,4610890,4611934,4613145,4613879,4614720,4615548,4616412,4617057,4617956,4618898,4620226,4621368,4622291,4623071,4624122,4625206,4626202,4627258,4628431,4629536,4630447,4631310,4632405,4633087,4634232,4635071,4636019,4636954,4637543,4638145,4638922,4639879,4641085,4642245,4643395,4644406,4645361,4646401,4647436,4648301,4649336,4650377,4651563,4652829,4654071,4654901,4656081,4657059,4658135,4659185,4660138,4660772,4661864,4662887,4663845,4664850,4665846,4667021,4668205,4669065,4669717,4670608,4671437,4672665,4673591,4674318,4675302,4676233,4677180,4677863,4679004,4679970,4681277,4682538,4683551,4684013,4685123,4686034,4687132,4688326,4689510,4690546,4691686,4692707,4693719,4694870,4695743,4696964,4697920,4698988,4699703,4700790,4701759,4702969,4704106,4705442,4706768,4708033,4709158,4710220,4711325,4712337,4713292,4714421,4715225,4716090,4716818,4717697,4718479,4719355,4720447,4721372,4722118,4723155,4724375,4725379,4726179,4726944,4727714,4728381,4729093,4729983,4730791,4731699,4732601,4733642,4734410,4734983,4735833,4736916,4737594,4738506,4739516,4740554,4741540,4742734,4743693,4744924,4745911,4747014,4748108,4749155,4749993,4750680,4751681,4752631,4753629,4754593,4755718,4756707,4757935,4758783,4759779,4760720,4761509,4762364,4763348,4764298,4765260,4766413,4767223,4767878,4768505,4769056,4769904,4771042,4771983,4772874,4773860,4774745,4775790,4776855,4777913,4778655,4779445,4780619,4781543,4782471,4783251,4784249,4785189,4786150,4787011,4788108,4789031,4789923,4790740,4791603,4792405,4793162,4794073,4795088,4795830,4796710,4797865,4799310,4800341,4801184,4802080,4803035,4804253,4805575,4806968,4808267,4809372,4810587,4811542,4812500,4813519,4814457,4815337,4816390,4817512,4818578,4819509,4820438,4821592,4822567,4823372,4824320,4825566,4826733,4827859,4828972,4829980,4830879,4831915,4833002,4834131,4835217,4835984,4836629,4837474,4838203,4839294,4840293,4841619,4842950,4843771,4844755,4845777,4846812,4847998,4849067,4850160,4851137,4852012,4853026,4854334,4855370,4856482,4857160,4857922,4858949,4860184,4861296,4862457,4863609,4864638,4865353,4866303,4867315,4868316,4869089,4870038,4870899,4871870,4872564,4874030,4874787,4875634,4876498,4877248,4877986,4879211,4880171,4881201,4882146,4883298,4884481,4885575,4886505,4886900,4888017,4888897,4890060,4890992,4891912,4892622,4893309,4894254,4895409,4896931,4898229,4899191,4900277,4901262,4902604,4904187,4905577,4906873,4907917,4909023,4909732,4910716,4911565,4912517,4913344,4914266,4915444,4916162,4917018,4918007,4918931,4919679,4920889,4921602,4922548,4923409,4924382,4925200,4926121,4927046,4928105,4928781,4929510,4930390,4931226,4932595,4933396,4934160,4934917,4936075,4937214,4937902,4938908,4939690,4940782,4941390,4942256,4943133,4944018,4945049,4945759,4946503,4947424,4948121,4948752,4949532,4950665,4951899,4952668,4953731,4954980,4956026,4957152,4958230,4959289,4960488,4961468,4962697,4963556,4964895,4965574,4966842,4967999,4969162,4970176,4971185,4972314,4973384,4974528,4975450,4976420,4977751,4978749,4979702,4980923,4981932,4983063,4984268,4985237,4986141,4987047,4988480,4989766,4990731,4991800,4992861,4993806,4994986,4996172,4997064,4998280,4999499,5000562,5001583,5002432,5003409,5004472,5005522,5006722,5007646,5008551,5009890,5011327,5012480,5013472,5014432,5015268,5016087,5017204,5018290,5019132,5020151,5021285,5022266,5023308,5024143,5024923,5026017,5026883,5027456,5028012,5029242,5030314,5031278,5031994,5032785,5033760,5035153,5035847,5036455,5037054,5038130,5039745,5041324,5042823,5043476,5044590,5045358,5046058,5047120,5047856,5048436,5049100,5050248,5051324,5052465,5053469,5054410,5055499,5056469,5057621,5058764,5059656,5060644,5061776,5062538,5063751,5064667,5065531,5066760,5067791,5068907,5069843,5070789,5071860,5072815,5073640,5074635,5075651,5076685,5077772,5078726,5079837,5080372,5080921,5081394,5081998,5082915,5083945,5084679,5085599,5086360,5087403,5088316,5089121,5089899,5090821,5091913,5093146,5094246,5095116,5095839,5096554,5097229,5097842,5098629,5099169,5099811,5100352,5101154,5102223,5103375,5104392,5105192,5105572,5106860,5108001,5109121,5110054,5111210,5112290,5113332,5114507,5115521,5116639,5117858,5118855,5120062,5120852,5121749,5122759,5123738,5124630,5125763,5126815,5127579,5128633,5129828,5130889,5132012,5133067,5133826,5134983,5135435,5136405,5137657,5138635,5139885,5140771,5141796,5142782,5143891,5144735,5145841,5147014,5147856,5148887,5149442,5150553,5151544,5152285,5153118,5153787,5154764,5155786,5156507,5157285,5157962,5159073,5159935,5160750,5161623,5162637,5163735,5165030,5166150,5166947,5168246,5169044,5169976,5170518,5171295,5172459,5173228,5174105,5175008,5175784,5176826,5177828,5178767,5179671,5180642,5181309,5182434,5183553,5184749,5185740,5186926,5187889,5189238,5190135,5191191,5192379,5193537,5194580,5195582,5196505,5197807,5198722,5199843,5200760,5201582,5202667,5203763,5204822,5205890,5206820,5207732,5208404,5209069,5210024,5211235,5212354,5213486,5214526,5215704,5216734,5217720,5218566,5219384,5220308,5221329,5222399,5223255,5224245,5224867,5225540,5226523,5227440,5228384,5229188,5230226,5230922,5231697,5232531,5233721,5234812,5235853,5237071,5238334,5239641,5240844,5241835,5243047,5244092,5245301,5246481,5247443,5248538,5249514,5250499,5251608,5252680,5253842,5254976,5255854,5257097,5258281,5259395,5260497,5261039,5262106,5263099,5264128,5265221,5266425,5267682,5268746,5269949,5271079,5272084,5273187,5274306,5275561,5276536,5277434,5278303,5279261,5280491,5281683,5282710,5283803,5285015,5286133,5287313,5288315,5289373,5290578,5291638,5292636,5293549,5294389,5295229,5296484,5297126,5297932,5298707,5299801,5300530,5301251,5302253,5303113,5304145,5305539,5306785,5307568,5308774,5310133,5311126,5311894,5312448,5313323,5313953,5314555,5315299,5316125,5316878,5317557,5318434,5319267,5319927,5320579,5321407,5322244,5323419,5324734,5325676,5326230,5326850,5327764,5328482,5329654,5330593,5331074,5331871,5333009,5334057,5335161,5336517,5337671,5338336,5339108,5340188,5341037,5342146,5343366,5344385,5345492,5346789,5347509,5348724,5349735,5350944,5351990,5353149,5354253,5355041,5355646,5356690,5357474,5358341,5359233,5360199,5360967,5361479,5362530,5363369,5364131,5364898,5365547,5366438,5367277,5368571,5369451,5370345,5371348,5372249,5373181,5374212,5374852,5375535,5376259,5377034,5377806,5378552,5379087,5379854,5380568,5381309,5382212,5383419,5384350,5385661,5386715,5388002,5389109,5390448,5391606,5392536,5393515,5395105,5396556,5397685,5398752,5399723,5400512,5401254,5402034,5403099,5403946,5404627,5405424,5406467,5407326,5407929,5408792,5409651,5410544,5411461,5412294,5413135,5413938,5414927,5415952,5416749,5417665,5418504,5419355,5420181,5421137,5422030,5422724,5423691,5424545,5425435,5426014,5426890,5427689,5428427,5429394,5430200,5431062,5432063,5432976,5434065,5434899,5435584,5436438,5437188,5437920,5438782,5439390,5440418,5441086,5442043,5442858,5443380,5444007,5444788,5445539,5446267,5446979,5447925,5448638,5449392,5450214,5451088,5452068,5452923,5453626,5454432,5455328,5456372,5457367,5458312,5459182,5460189,5460971,5461656,5462509,5463281,5463998,5464694,5465476,5466432,5467635,5468827,5469654,5470465,5471258,5471997,5472908,5473677,5474373,5475030,5475725,5476346,5477038,5477841,5478686,5479685,5480505,5481369,5482224,5483044,5483995,5484797,5485675,5486522,5487331,5488272,5489446,5490489,5491241,5492311,5493317,5494254,5495427,5496532,5497739,5498913,5499688,5500336,5500938,5501560,5502368,5503022,5503553,5504273,5505254,5505871,5506757,5507643,5508310,5509134,5509928,5510546,5511138,5511996,5512802,5513473,5514112,5514639,5515320,5515988,5516771,5517495,5518229,5519079,5519831,5520664,5521621,5522646,5523584,5524508,5525528,5526479,5527248,5527885,5528735,5529800,5530883,5531830,5532719,5533393,5533942,5534738,5535443,5536327,5537185,5538189,5539238,5540159,5540978,5541956,5542761,5543229,5543770,5544751,5545524,5546341,5547251,5548116,5549016,5550265,5551317,5552432,5553784,5554802,5555904,5557031,5557996,5558751,5559301,5560205,5561309,5562483,5563516,5564626,5565600,5566641,5567534,5568745,5569653,5570674,5571558,5572205,5573508,5574453,5575130,5575841,5576530,5577434,5578149,5579035,5580286,5581646,5582721,5583858,5584998,5585821,5586687,5587658,5588768,5589794,5590853,5591808,5592791,5594120,5595174,5596404,5597372,5598415,5599762,5600439,5601278,5602022,5602707,5603614,5604221,5604966,5605609,5606371,5607470,5608320,5608923,5609665,5610408,5611487,5612504,5613267,5614499,5615835,5616856,5617519,5618233,5619122,5620220,5621428,5622588,5623819,5624825,5625819,5627043,5628014,5629026,5630063,5631063,5632050,5632944,5634049,5635053,5636191,5637261,5638173,5639314,5640343,5641069,5642089,5643182,5644278,5645157,5646141,5647131,5648261,5649093,5649990,5650920,5651706,5652678,5653877,5654951,5656048,5657276,5658521,5659615,5660561,5661814,5662791,5663543,5664648,5665674,5666973,5668342,5669439,5670544,5671614,5672209,5673296,5674574,5675617,5677098,5678173,5679289,5680263,5680958,5681670,5682506,5683382,5684038,5684608,5685145,5685775,5686289,5687039,5687773,5688695,5689382,5690115,5691169,5691937,5692631,5693271,5694195,5694764,5695335,5696170,5697092,5697830,5699048,5699970,5700643,5701333,5702065,5702514,5703097,5703951,5704887,5705686,5706603,5707749,5708842,5709794,5710670,5711458,5712485,5713704,5714925,5715741,5716673,5717456,5718539,5719352,5720166,5721076,5721950,5722606,5723310,5724191,5724719,5725278,5726142,5727332,5728323,5729055,5730093,5731285,5732341,5733336,5734421,5735435,5736779,5737973,5738805,5739890,5741085,5742227,5743337,5744412,5745701,5746912,5748310,5749615,5750985,5752402,5753580,5754542,5755625,5756637,5757603,5758470,5759436,5760480,5761203,5762456,5763846,5765060,5766307,5767388,5768531,5769666,5770572,5771529,5772451,5773413,5774537,5775448,5776283,5777219,5778248,5778986,5779825,5780670,5781586,5782480,5783499,5784696,5785717,5786541,5787411,5788263,5789482,5790722,5791703,5792453,5793240,5794111,5794617,5795540,5796612,5797887,5798532,5799013,5799811,5800520,5801390,5802078,5803165,5804145,5805610,5806732,5807794,5808842,5810027,5811016,5811957,5812749,5813952,5814978,5816298,5817082,5818149,5819193,5820010,5820817,5821662,5822391,5822985,5823713,5824412,5825101,5826024,5826886,5827802,5828700,5829896,5831117,5832210,5833232,5834492,5835524,5836291,5836869,5837928,5839174,5840237,5841047,5842001,5843049,5844145,5844999,5845814,5846497,5847795,5849090,5850114,5851021,5851707,5852636,5853483,5854463,5855221,5855870,5856754,5857796,5858536,5859322,5859807,5860550,5861337,5862104,5863021,5864007,5864810,5865631,5866636,5867542,5868557,5869407,5870134,5870705,5871219,5871905,5872904,5873807,5874668,5875644,5876273,5876974,5877886,5879148,5880032,5880889,5881855,5883029,5883964,5884553,5885505,5886502,5887386,5888241,5888923,5889884,5890620,5890993,5891671,5892322,5893410,5894459,5895493,5896167,5896794,5897817,5898947,5899844,5900858,5901426,5901950,5902709,5903246,5903834,5904550,5905487,5906441,5907462,5908288,5908964,5910060,5911019,5912234,5913363,5914449,5915397,5916454,5917695,5918347,5919226,5920019,5920812,5921859,5923070,5923786,5924621,5925592,5926527,5927775,5928989,5930304,5931501,5932239,5933221,5934026,5935263,5936251,5937138,5938027,5939134,5940190,5940934,5942202,5943228,5944439,5945597,5946857,5947780,5948520,5949160,5949830,5950606,5951417,5952207,5953149,5954021,5954849,5955613,5956322,5957276,5958134,5959271,5960162,5960966,5961860,5962731,5963389,5964242,5965089,5965969,5967175,5968042,5968782,5969435,5970215,5971181,5971970,5972909,5973889,5974529,5975366,5976472,5977575,5978508,5979090,5980046,5980867,5981676,5982562,5983040,5984049,5985026,5986006,5986977,5987783,5988656,5989579,5990250,5991071,5991943,5992972,5994050,5995223,5996218,5997300,5998200,5998916,5999971,6001080,6001832,6002835,6003777,6004861,6005744,6006861,6007551,6008438,6009200,6009819,6010470,6011555,6012251,6013049,6013992,6015017,6016330,6017057,6017835,6019308,6020327,6021275,6022523,6023681,6024968,6025828,6026676,6027597,6028432,6029300,6030242,6031273,6032264,6033321,6034421,6035356,6036275,6037673,6038887,6039972,6040530,6041443,6042639,6043682,6044528,6045539,6046600,6047508,6048405,6049259,6050435,6051649,6052598,6053632,6054632,6055635,6056533,6057109,6058598,6059869,6061554,6063261,6064951,6066309,6068051,6069713,6071426,6072056,6072360,6072502,6072527,6072622,6072712,6072806,6072831,6072926,6073016,6073110,6073135,6073230,6073320,6073414,6073439,6073534,6073624,6073718,6073743,6073838,6073928,6074022,6074047,6074142,6074232,6074326,6074351,6074446,6074536,6074630,6074655,6074750,6074840,6074934,6074959,6075054,6075144,6075238,6075263,6075358,6075448,6075542,6075567,6076562,6078261,6079946,6081649,6083066,6084788,6086457,6087869,6089274,6090996,6092665,6094071,6094330,6095720,6097442,6099111,6100479,6100616,6100899,6100994,6101084,6101174,6101264,6101354,6101444,6101534,6101624,6101714,6101804,6101894,6101984,6102074,6102164,6102254,6102344,6102434,6102524,6102614,6102704,6103206,6103301,6103391,6103481,6103571,6103661,6103751,6103841,6103931,6104021,6104111,6104201,6104291,6104381,6104471,6104561,6104651,6104741,6104831,6104921,6105011,6105516,6105606,6105696,6105786,6105876,6105966,6106056,6106146,6106236,6106326,6106416,6106506,6106596,6106686,6106776,6106866,6106956,6107046,6107136,6107226,6107438,6107861,6107951,6108041,6108131,6108221,6108311,6108401,6108491,6108581,6108671,6108761,6108851,6108941,6109031,6109121,6109211,6109301,6109391,6109481,6109571,6109787,6111472,6113179,6114869,6116197,6117939,6119601,6121314,6122684,6124383,6126068,6127771,6129165,6130887,6132556,6133924,6134609,6136351,6138013,6139726,6141084,6142783,6144468,6146171,6146442,6148127,6149834,6151524,6152549,6152848,6153907,6155606,6157291,6158994,6159715,6161457,6163119,6164832,6165499,6167184,6168891,6170581,6171607,6173015,6174737,6176406,6177782,6178834,6180533,6182218,6183921,6184600,6186342,6188004,6189717,6190440,6192125,6193832,6195522,6196499,6197513,6198580,6199581,6200788,6202216,6203878,6205926,6207974,621e4,6212048,6214096,6216144,6218192,6220240,6222288,6224336,6226384,6228432,6230480,6232528,6234576,6236624,6238672,6240720,6242768,6244816,6246864,6248912,6250960,6253008,6255056,6257104,6259152,6261200,6263248,6265296,6267344,6269392,6271446,6273494,6275542,6277590,6279638,6281686,6283734,6285782,6287830,6289878,6291935,6293983,6296031,6298079,6300127,6302175,6304223,6306271,6308319,6310376,6312424,6314472,6316520,6318568,6320616,6322664,6324712,6326760,6328808,6330856,6332904,6334952,6337e3,6339057,6341105,6343153,6345201,6347249,6349297,6351345,6353393,6355441,6357489,6359537,6361585,6363633,6365681,6367729,6369777,6371825,6373873,6375921,6377969,6380017,6382065,6384113,6386161,6388209,6390262,6392310,6394342,6395946,6397994,6400042,6401780,6403720,6405768,6407702,6409439,6411487,6413535,6415583,6417631,6419679,6421727,6423447,6425264,6427169,6429121,6430662,6432370,6433959,6435685,6437485,6438845,6440459,6442167,6443912,6445475,6446919,6448006,6448731,6449839,6450783,6451618,6452442,6453352,6453946,6454534,6455170,6456062,6456967,6457600,6458258,6458955,6459561,6460140,6460499,6460883,6461518,6462370,6463020,6463906,6464665,6465294,6466134,6466744,6467408,6468084,6468715,6469196,6469650,6470014,6470544,6471125,6471686,6472424,6473074,6473767,6474519,6475187,6476052,6476900,6477770,6478630,6479315,6479782,6480288,6481035,6481884,6482470,6483055,6483761,6484606,6485341,6486075,6486803,6487364,6487887,6488333,6488689,6489188,6489747,6490394,6491012,6491656,6492342,6493201,6493713,6494241,6494699,6495844,6496597,6497455,6498044,6498747,6499326,6499849,6500460,6501717,6502679,6503427,6504317,6505088,6505901,6506777,6507318,6507944,6508506,6509254,6509789,6510360,6511127,6511890,6512609,6513339,6514186,6515006,6515532,6516469,6517185,6517935,6518622,6519286,6520052,6520476,6521041,6521783,6522505,6523635,6524287,6525015,6525753,6526455,6527538,6528196,6528919,6529928,6530907,6531713,6532413,6533092,6533923,6534592,6535194,6535892,6536592,6537434,6537949,6538596,6539483,6540271,6541462,6542155,6542959,6543590,6544212,6544913,6545510,6546133,6546730,6547405,6548126,6548905,6549406,6550038,6550757,6551544,6552359,6553473,6554106,6554852,6555491,6556134,6556820,6557380,6557988,6558522,6559176,6559858,6560539,6561045,6561668,6562284,6563108,6563765,6564758,6565776,6566962,6567930,6569010,6570090,6571010,6571881,6572704,6573875,6574913,6575989,6576942,6577782,6578628,6579517,6580125,6580581,6581092,6581826,6582701,6583322,6584194,6584754,6585349,6585807,6586340,6587084,6587891,6588540,6589416,6589853,6590538,6590914,6591397,6591713,6592306,6592847,6593467,6594321,6594889,6595453,6596009,6596539,6596918,6597311,6597719,6598158,6598562,6599409,6600292,6600954,6601703,6602367,6602951,6603642,6604179,6604688,6605527,6606831,6607815,6608513,6609378,6610108,6610972,6611706,6612508,6612987,6613438,6613917,6614452,6614978,6615534,6616337,6617093,6617805,6618490,6619330,6620007,6620603,6621168,6621791,6622284,6622884,6623574,6624613,6625507,6626097,6626738,6627526,6628277,6629136,6629869,6630587,6631528,6632353,6633349,6634357,6635143,6635802,6636479,6637267,6637905,6638520,6639170,6639851,6640588,6641310,6641984,6642754,6643304,6644435,6645255,6645984,6646620,6647259,6647982,6648618,6649191,6649835,6650490,6651190,6651890,6652359,6653126,6654403,6655092,6655839,6656461,6657083,6657777,6658365,6658976,6659497,6660149,6660855,6661542,6662175,6662867,6663593,6664872,6665509,6666225,6666848,6667471,6668154,6668732,6669296,6669826,6670455,6671074,6671721,6672415,6673043,6673704,6674426,6675602,6676624,6677691,6678764,6679720,6680787,6681640,6682425,6683229,6684387,6685210,6685954,6686832,6687644,6688494,6689433,6690220,6690929,6691542,6692340,6692992,6693782,6694337,6695219,6696599,6698002,6699135,6699594,6700583,6701569,6702532,6703531,6704664,6705593,6706282,6707101,6707814,6708587,6709319,6710001,6710675,6711297,6712480,6713578,6714422,6715384,6716225,6716863,6717585,6718364,6719117,6719864,6720555,6721224,6722441,6723737,6724669,6725592,6726339,6727118,6727663,6728319,6728954,6729570,6730120,6730807,6731498,6732019,6732696,6733382,6734221,6734838,6735698,6736406,6737240,6737752,6738178,6738680,6739347,6740204,6740768,6741322,6741878,6742515,6742974,6743502,6744306,6745e3,6745632,6746324,6746892,6747433,6747934,6748388,6748734,6749214,6749735,6750220,6750781,6751609,6752088,6752547,6753042,6753691,6754886,6755644,6756436,6756993,6757664,6758810,6759642,6760637,6761830,6762780,6763654,6764463,6765506,6766477,6767618,6768499,6769267,6769966,6770918,6771815,6772858,6773916,6774815,6775640,6776669,6777623,6778722,6779677,6780578,6781353,6782315,6783112,6784285,6785217,6786324,6787185,6787859,6788597,6789343,6789941,6790725,6791433,6792030,6792726,6793442,6794253,6794899,6795542,6796689,6797215,6797953,6799001,6799692,6800345,6801224,6802284,6803097,6803597,6804029,6804663,6805207,6805668,6806245,6806725,6807338,6807948,6808697,6809784,6810562,6811298,6811839,6812586,6813172,6813715,6814519,6815652,6816699,6817714,6818504,6819258,6820162,6820782,6821503,6822239,6823028,6823729,6824358,6825137,6825710,6826339,6827146,6827676,6828431,6828897,6829304,6829776,6830244,6831274,6832209,6833094,6834008,6835158,6835872,6836566,6837347,6838193,6838947,6839697,6840378,6841010,6841814,6842433,6842929,6843663,6844827,6845497,6846292,6846925,6847700,6848428,6849165,6849923,6850591,6851257,6851932,6852600,6853098,6853572,6854411,6855472,6856137,6856804,6857372,6858115,6858701,6859580,6860633,6861192,6861817,6862391,6862955,6863532,6864121,6864720,6865776,6866703,6867469,6868222,6869022,6869776,6870602,6871526,6872103,6872810,6873555,6874152,6874802,6876012,6876832,6877736,6878518,6879381,6880149,6880962,6881755,6882307,6882750,6883239,6883728,6884155,6884640,6885213,6885925,6886616,6887701,6888753,6889410,6890212,6890984,6891517,6892598,6893427,6894197,6894992,6895543,6896262,6896863,6897385,6898002,6899083,6900169,6901208,6902341,6903118,6904051,6904866,6905651,6906315,6907213,6908133,6908803,6909198,6909544,6910299,6911059,6911714,6912551,6913245,6913919,6914757,6915964,6916765,6917466,6918349,6919e3,6919801,6920568,6921289,6921888,6922668,6923693,6924742,6925493,6926719,6927259,6928068,6928829,6929639,6929974,6930427,6930812,6931250,6931722,6932611,6933175,6933636,6934050,6934760,6935250,6935723,6936223,6936598,6936973,6937355,6938134,6938904,6939518,6940384,6940728,6941251,6941699,6942067,6942339,6942604,6943039,6943396,6943791,6944154,6944523,6944803,6945379,6946224,6946519,6946960,6947359,6947797,6948660,6949349,6949704,6950062,6950406,6950758,6951349,6952020,6952812,6953320,6953915,6954571,6955108,6955620,6956447,6957759,6958724,6959348,6959848,6960506,6961339,6962185,6962933,6963689,6964070,6964374,6964737,6965032,6965551,6966078,6966634,6967444,6968193,6968900,6969579,6970397,6971072,6971673,6972260,6972909,6973379,6973959,6974545,6975542,6976545,6976946,6977396,6977780,6978193,6978765,6979426,6980119,6980774,6981424,6982109,6982745,6983394,6984397,6985172,6986326,6987105,6987804,6988467,6989190,6989739,6990378,6990950,6991630,6992296,6992960,6993672,6994334,6995006,6995468,6995995,6997211,6997928,6998621,6999258,6999878,7000490,7001040,7001649,7002242,7002873,7003465,7004217,7004752,7005381,7006432,7007422,7008112,7008822,7009431,7010060,7010730,7011306,7011851,7012519,7013199,7013877,7014549,7015202,7015982,7016924,7017929,7018547,7019221,7019854,7020485,7021162,7021736,7022311,7022811,7023441,7024114,7024778,7025398,7026015,7026732,7027696,7028674,7029682,7030562,7031544,7032442,7033362,7034250,7034908,7035545,7036502,7037478,7038154,7038939,7039757,7040597,7041537,7042375,7043130,7043823,7044459,7045266,7045977,7046723,7047274,7048033,7049588,7051157,7052786,7054412,7055428,7056351,7057268,7058185,7059098,7060025,7060956,7062111,7062906,7063631,7064482,7065158,7065900,7066649,7067315,7067982,7068639,7069872,7070849,7071547,7072311,7072958,7073643,7074245,7074880,7075424,7076071,7076673,7077293,7077851,7078299,7078940,7079979,7081295,7082381,7083382,7084296,7085063,7085808,7086420,7087001,7087501,7088150,7088764,7089314,7090002,7090682,7091197,7091796,7092510,7093509,7094037,7094742,7095477,7096186,7096769,7097183,7097558,7097885,7098300,7098976,7099747,7100268,7100736,7101182,7101670,7102001,7102382,7102829,7103277,7104006,7104707,7105296,7106079,7106450,7106936,7107394,7107754,7108e3,7108249,7108502,7109e3,7109340,7109725,7110086,7110362,7110756,7111279,7111934,7112637,7113008,7113388,7113836,7114094,7114722,7115369,7116384,7117061,7117775,7118337,7119129,7119721,7120363,7121163,7122212,7123047,7124056,7125222,7126095,7126888,7127902,7128927,7129777,7130910,7131733,7132524,7133332,7134305,7135146,7136348,7137298,7138052,7138961,7140018,7140884,7142037,7142915,7143710,7144587,7145558,7146383,7147532,7148307,7149372,7150228,7150904,7151468,7152103,7152814,7153432,7154162,7154855,7155403,7156157,7156827,7157635,7158232,7158821,7159695,7160413,7160795,7161420,7162173,7163102,7163486,7164057,7164675,7165824,7166511,7167109,7167462,7167850,7168423,7168892,7169296,7169657,7170160,7170625,7171146,7171703,7172291,7173290,7174006,7174382,7174743,7175121,7175535,7176203,7176795,7177555,7178308,7178826,7179428,7180123,7180677,7181254,7182236,7183218,7184160,7185140,7185957,7186755,7187638,7188270,7188965,7189706,7190427,7191128,7191743,7192516,7193174,7193748,7194538,7195127,7195896,7196456,7196856,7197330,7197812,7198743,7199714,7200609,7201442,7202536,7203356,7204182,7204907,7205766,7206500,7207265,7207958,7208663,7209370,7210095,7210597,7211214,7212010,7213053,7213724,7214405,7215166,7215984,7216660,7217480,7218159,7218813,7219430,7220143,7220840,7221329,7221953,7222573,7223673,7224256,7224659,7225104,7225525,7226103,7226549,7227103,7227733,7228393,7229466,7230251,7230695,7231321,7231904,7232484,7233086,7233674,7234291,7235449,7236247,7237065,7237805,7238367,7239035,7239462,7239883,7240942,7241552,7241908,7242320,7242847,7243391,7243922,7244536,7245167,7246185,7247250,7248001,7248740,7249527,7250342,7251050,7251660,7252401,7252753,7253077,7253444,7253857,7254241,7254553,7254899,7255312,7255822,7256436,7257046,7257947,7259244,7259870,7260596,7261406,7262031,7262722,7263777,7264183,7264534,7264946,7265300,7265899,7266682,7267457,7267837,7268407,7269083,7269623,7270141,7270957,7272254,7273305,7274083,7274929,7275673,7276477,7277306,7277922,7278445,7278993,7279524,7280072,7280850,7281580,7282295,7283024,7283870,7284627,7285325,7285909,7286518,7287016,7287695,7288411,7289718,7290708,7291634,7292458,7293204,7294132,7295e3,7295725,7296876,7297768,7298334,7299198,7300416,7301709,7302305,7302927,7303536,7304291,7304994,7306240,7307746,7309235,7310794,7312222,7313534,7314831,7315854,7316706,7317588,7318723,7319785,7320857,7321395,7321958,7322478,7323101,7323689,7324245,7324876,7325439,7325998,7326533,7327187,7327817,7328421,7329037,7329642,7330230,7330898,7331517,7332113,7332819,7333526,7334220,7334910,7335526,7336140,7336784,7337349,7337911,7338454,7339114,7339755,7340342,7341210,7341920,7342694,7343444,7344728,7346424,7347546,7348365,7349566,7350908,7351847,7352727,7353508,7354173,7354656,7355446,7355825,7356183,7356742,7357627,7358260,7358808,7359530,7360233,7360811,7361464,7361952,7362838,7363580,7364359,7365068,7365869,7366610,7367459,7368047,7368547,7369138,7369754,7370647,7371384,7371975,7372589,7373224,7373892,7374331,7374808,7375179,7375549,7376387,7377087,7377668,7378517,7379247,7379917,7380753,7381312,7381947,7382617,7383082,7383446,7383855,7384170,7384799,7385350,7385842,7386578,7387208,7387848,7388577,7389206,7390118,7390729,7391525,7392224,7393045,7393613,7394050,7394545,7395174,7396069,7396583,7397115,7397665,7398427,7399175,7399821,7400435,7401289,7401796,7402355,7402716,7403127,7403434,7404006,7404535,7405173,7405794,7406388,7407038,7407941,7408388,7408903,7409445,7410410,7411135,7411681,7412473,7412968,7414010,7415091,7415791,7416415,7417201,7418311,7419113,7419788,7420513,7421058,7422288,7423900,7425433,7426991,7428547,7429949,7431231,7432356,7433204,7433817,7434685,7435724,7436904,7437792,7438665,7439388,7439815,7440288,7440896,7441826,7443415,7445031,7446619,7448103,7449562,7450821,7451881,7452889,7454086,7454981,7455937,7457040,7458141,7459061,7459853,7460676,7461620,7462709,7463566,7464746,7465732,7466550,7467280,7467993,7468825,7469810,7470726,7471454,7472628,7473585,7474430,7475263,7476214,7477282,7478219,7479336,7480354,7481287,7482019,7482852,7483720,7484736,7485597,7486788,7487724,7488471,7489496,7490572,7491449,7492130,7492799,7493482,7493872,7494247,7495188,7495975,7496615,7497357,7498111,7498742,7499330,7499966,7500592,7501529,7502251,7503474,7504169,7504921,7505888,7506859,7507559,7508424,7509602,7510501,7511098,7511679,7512272,7512853,7513407,7513991,7514607,7515251,7515924,7516548,7517589,7518565,7519355,7520044,7520676,7521346,7521893,7522506,7523672,7524644,7525528,7526296,7527488,7528265,7529176,7530337,7531090,7531972,7532711,7533522,7534236,7534858,7535778,7536420,7537022,7537657,7538385,7538932,7539961,7540579,7541187,7541827,7542704,7543486,7544515,7545446,7546215,7547105,7547753,7548128,7548596,7549294,7550955,7552583,7554266,7555735,7557187,7558411,7560071,7561750,7563403,7564888,7566352,7567565,7568616,7569202,7570089,7570801,7571877,7572928,7573549,7574198,7574926,7575482,7576178,7576737,7577330,7577894,7578446,7579023,7579574,7580128,7580715,7581260,7581819,7582350,7582910,7583454,7584019,7584568,7585095,7585601,7586177,7586727,7587417,7588013,7588601,7589201,7589784,7590374,7590943,7591486,7592054,7592597,7593138,7593677,7594237,7594776,7595299,7595837,7596374,7596910,7597441,7597981,7598519,7599035,7599553,7600075,7600611,7601161,7601675,7602191,7602711,7603246,7603771,7604291,7604821,7605332,7605844,7606355,7606864,7607381,7607898,7608408,7608926,7609446,7609953,7610479,7610984,7611497,7612e3,7612502,7613241,7614094,7614867,7615754,7616918,7617704,7618471,7619364,7619998,7620731,7621493,7622350,7622962,7623631,7624285,7624885,7625675,7626495,7627325,7627947,7628551,7629412,7629986,7630684,7631208,7631993,7632813,7633985,7634775,7635643,7636546,7637399,7638274,7639041,7639848,7640699,7641492,7642384,7643306,7644104,7644903,7645793,7646652,7647556,7648397,7649284,7650103,7650986,7651880,7652749,7653561,7654408,7655310,7656164,7657075,7657986,7658824,7659672,7660527,7661353,7662223,7663029,7663893,7664694,7665511,7666248,7667119,7667967,7668773,7669588,7670484,7671330,7672171,7673012,7673926,7674718,7675713,7676610,7677315,7678130,7678962,7679766,7680612,7681484,7682388,7683271,7684105,7685007,7685880,7686790,7687670,7688538,7689360,7690175,7691047,7691944,7692742,7693655,7694455,7695317,7696136,7696925,7697689,7698589,7699380,7700275,7701078,7701877,7702727,7703474,7704283,7705095,7705953,7706776,7707497,7708277,7709111,7709935,7710773,7711601,7712413,7713213,7714054,7714940,7715846,7716796,7717606,7718477,7719376,7720209,7721116,7721958,7722818,7723688,7724583,7725477,7726292,7727134,7727916,7728676,7729591,7730397,7731127,7731863,7732709,7733533,7734400,7735256,7736038,7736850,7737656,7738437,7739233,7740063,7740982,7741899,7742633,7743425,7744257,7745090,7745975,7746779,7747548,7748407,7749285,7750214,7751149,7752041,7752913,7753738,7754668,7755543,7756392,7757152,7757979,7758930,7760066,7760932,7761845,7762729,7763568,7764439,7765295,7766108,7767013,7767819,7768714,7769489,7770406,7771218,7772090,7773020,7773838,7774659,7775541,7776358,7777268,7778153,7779022,7779906,7780713,7781547,7782322,7783179,7784097,7785014,7785894,7786709,7787537,7788321,7789165,7790036,7790902,7791757,7792585,7793467,7794332,7795149,7796026,7796944,7797835,7798717,7799586,7800470,7801350,7802204,7803104,7803976,7804780,7805700,7806596,7807431,7808294,7809164,7810051,7810910,7811734,7812619,7813464,7814322,7815144,7816046,7816883,7817734,7818626,7819528,7820347,7821200,7822060,7822871,7823698,7824499,7825319,7826169,7827e3,7827819,7828635,7829437,7830265,7831090,7831964,7832790,7833609,7834480,7835398,7836255,7837114,7837939,7838801,7839660,7840461,7841291,7842136,7842997,7843849,7844690,7845564,7846435,7847265,7848107,7848962,7849843,7850788,7851675,7852600,7853489,7854388,7855266,7856125,7857004,7857840,7858705,7859574,7860441,7861281,7862109,7862980,7863805,7864711,7865634,7866463,7867368,7868217,7869079,7869938,7870839,7871676,7872562,7873439,7874271,7875128,7875954,7876785,7877634,7878459,7879357,7880169,7881105,7882194,7883138,7884097,7884878,7886003,7886825,7887651,7888341,7889123,7889942,7890780,7891536,7892196,7893021,7893689,7894202,7894707,7895752,7896619,7897258,7898013,7898800,7899594,7900319,7901059,7901874,7902618,7903244,7904036,7904899,7905988,7906774,7907609,7908281,7909052,7909827,7910685,7911371,7912216,7913033,7913845,7914529,7915159,7916033,7916578,7917216,7917853,7919002,7919691,7920392,7921185,7921779,7922698,7923950,7924530,7925113,7925704,7926295,7926886,7927454,7928006,7928565,7929150,7929705,7930317,7931122,7931722,7932352,7932986,7933766,7934411,7935070,7935748,7936333,7936873,7937634,7938019,7938522,7939189,7939744,7940426,7941140,7941896,7942804,7943910,7944560,7945196,7945786,7946370,7946983,7947585,7948281,7949429,7950184,7951099,7951954,7952849,7953867,7954646,7955353,7956078,7956745,7957821,7958638,7959357,7960193,7960771,7961502,7962106,7962712,7963299,7963891,7964452,7964993,7965617,7966179,7966753,7967294,7967844,7968404,7968955,7969500,7970047,7970588,7971197,7971754,7972431,7973025,7973617,7974207,7974788,7975359,7975917,7976476,7977031,7977588,7978151,7978694,7979247,7979799,7980330,7980875,7981404,7981944,7982493,7983020,7983563,7984084,7984618,7985148,7985677,7986199,7986724,7987248,7987777,7988301,7988819,7989354,7989878,7990395,7990915,7991428,7991963,7992477,7992992,7993513,7994024,7994547,7995054,7995570,7996085,7996598,7997340,7998289,7999093,8000229,8001104,8002031,8003145,8004355,8005088,8005657,8006372,8007192,8007637,8008165,8008728,8009424,8010300,8010945,8011571,8012103,8012791,8013608,8014098,8015047,8016453,8017395,8018306,8018933,8019824,8020748,8021446,8022356,8023154,8023977,8024542,8025096,8025654,8026130,8026650,8027335,8028026,8028745,8029347,8030283,8030935,8031687,8032770,8033956,8034882,8035777,8036607,8037450,8038476,8039161,8039952,8040755,8041291,8042331,8043313,8044103,8044888,8045488,8046120,8046659,8047226,8048648,8049623,8050987,8051923,8052704,8053950,8054990,8055971,8056855,8057626,8058506,8059351,8060781,8061947,8062983,8064050,8065174,8066326,8067437,8068227,8069447,8070801,8072189,8074056,8075415,8076831,8078483,8080063,8081643,8083158,8084255,8085090,8086043,8087008,8088423,8089559,8090938,8092158,8093387,8094568,8095623,8096706,8097794,8098943,8100074,8101354,8102537,8103790,8104962,8106456,8107655,8108987,8109876,8111292,8112584,8113953,8115205,8116624,8118022,8119280,8120479,8121426,8122580,8123789,8124841,8126045,8127499,8128413,8129390,8130289,8131325,8132579,8133956,8135143,8136522,8137872,8139199,8140358,8141742,8143005,8144510,8145837,8147092,8148388,8149726,8151114,8152030,8153269,8154487,8155701,8156681,8157925,8159366,8160703,8162141,8163240,8164701,8165864,8167187,8168491,8169730,8170704,8171955,8173156,8174457,8175596,8176976,8177975,8179195,8180498,8181597,8182773,8184188,8185509,8186801,8188023,8188942,8190264,8191444,8192608,8193920,8195180,8196347,8197377,8198562,8199861,8201217,8202295,8203369,8204821,8205831,8206992,8208342,8209535,8210929,8212193,8213405,8214644,8215966,8217190,8218757,8220375,8221486,8222786,8223910,8225177,8226262,8227235,8228530,8229956,8231058,8231981,8233158,8234361,8235481,8236680,8237811,8239042,8240213,8241066,8242234,8243029,8244290,8245334,8246523,8247556,8248384,8248952,8250215,8251235,8252341,8253095,8253782,8254917,8255708,8256890,8257835,8258926,8259937,8261272,8262301,8263033,8263970,8264790,8265762,8266683,8267666,8268581,8269540,8270513,8271645,8272648,8273693,8274735,8275790,8276843,8277889,8278595,8279728,8280544,8281524,8282410,8283240,8284173,8285316,8286267,8287539,8288627,8289688,8290829,8291810,8292986,8293864,8294904,8295915,8297019,8298024,8299333,8300516,8301134,8301927,8302899,8303906,8304797,8306055,8307246,8308182,8309186,8310181,8310937,8311889,8313018,8313471,8313944,8314837,8315571,8316271,8317206,8317825,8318682,8319104,8319601,8320398,8320955,8321393,8321909,8323312,8324641,8325758,8327060,8328158,8329122,8329803,8330847,8331845,8332802,8333729,8334686,8335454,8335969,8337160,8337976,8338872,8339900,8340743,8341786,8342610,8343962,8345236,8346245,8347198,8348344,8349691,8350862,8352151,8353401,8354472,8355480,8356746,8357827,8358987,8359918,8360969,8361952,8363025,8364365,8365626,8366709,8367726,8368604,8369697,8370690,8371555,8372645,8373785,8374813,8375659,8376519,8377241,8378365,8379289,8380551,8381449,8382457,8383576,8384785,8385953,8386886,8387739,8388659,8389263,8390426,8391393,8392275,8393043,8394164,8395112,8396138,8397007,8398014,8399110,8400340,8401005,8401775,8402655,8403600,8404691,8405506,8406594,8407519,8408379,8409322,8410417,8411389,8412154,8413040,8414094,8414509,8415076,8416050,8416625,8417540,8418329,8418899,8420165,8421213,8422194,8423466,8424347,8425743,8426781,8427786,8428966,8430180,8431511,8432644,8433615,8434615,8435541,8436227,8437294,8438450,8439617,8440643,8441686,8442753,8443867,8444860,8446033,8446966,8448041,8448994,8449803,8450562,8451271,8452096,8453140,8454102,8455159,8456263,8457065,8458244,8459348,8460517,8461515,8462651,8463421,8464240,8464942,8465819,8467306,8468850,8470771,8472705,8474499,8476429,8478351,8480212,8481544,8482658,8483820,8484763,8485770,8486825,8487955,8488789,8489355,8490221,8491208,8492242,8493173,8494157,8495235,8495773,8496662,8497629,8498521,8499444,8500376,8501366,8502552,8503442,8504449,8505523,8506609,8507241,8508314,8509335,8510245,8511373,8511964,8512801,8513555,8514283,8515054,8515861,8516621,8517508,8518631,8519590,8520694,8521645,8522501,8523432,8524547,8525754,8526678,8527390,8528421,8529344,8530377,8531247,8532211,8533087,8534050,8535033,8536190,8537405,8538352,8539369,8540482,8541310,8542231,8543292,8544086,8545025,8545980,8547004,8548015,8549082,8549982,8550958,8552048,8552807,8553906,8554816,8555591,8556377,8557064,8557932,8558743,8559706,8560472,8561298,8562269,8563176,8564063,8564922,8565618,8566174,8566707,8567500,8568300,8569061,8569915,8570840,8571799,8572642,8573262,8573920,8574836,8575596,8576296,8577221,8578254,8579182,8579699,8580463,8581290,8582580,8583495,8584208,8585236,8586154,8587018,8587800,8588588,8589218,8590169,8590871,8591497,8592375,8593131,8593904,8594667,8595617,8596662,8597709,8598731,8599433,8600227,8600950,8601748,8602589,8603378,8604010,8604769,8605399,8606166,8606998,8607908,8608833,8609629,8610639,8611773,8612461,8613492,8614240,8615151,8616048,8616957,8617957,8618720,8619512,8620474,8621480,8622474,8623572,8624530,8625686,8626724,8627678,8628586,8629245,8630005,8630724,8631774,8632675,8633651,8634319,8635031,8635610,8636428,8637121,8638137,8638968,8639612,8640416,8641099,8641574,8642377,8643137,8643860,8644973,8646108,8647203,8648101,8649178,8649869,8650497,8651011,8651943,8652887,8654043,8655134,8656330,8657114,8657909,8659183,8660179,8661162,8662206,8663102,8663881,8665025,8665916,8666686,8667542,8668568,8669662,8670564,8671650,8672741,8673679,8674830,8675759,8676802,8678050,8678917,8679780,8680493,8681447,8682175,8682900,8683343,8684115,8684931,8685891,8686837,8687740,8688934,8690094,8690910,8691753,8692467,8693647,8694452,8695308,8696463,8697467,8698436,8699018,8699999,8700949,8702156,8703003,8703718,8704552,8705829,8706911,8707720,8708525,8709646,8710585,8711579,8712763,8713702,8714624,8715490,8716216,8717016,8718227,8719037,8720028,8720802,8721686,8722373,8723115,8723992,8724933,8725636,8726351,8727377,8728284,8729098,8730019,8730983,8731971,8732769,8733646,8734470,8735347,8736265,8737221,8738210,8739476,8740528,8741716,8742795,8743519,8744424,8745412,8746515,8747582,8748200,8748913,8749567,8750529,8751545,8752403,8753276,8754341,8755358,8756553,8757459,8758347,8759267,8760358,8761426,8762107,8763250,8764123,8764775,8765454,8766457,8767300,8768194,8768969,8770136,8771006,8771711,8772392,8773404,8774197,8774862,8775780,8776581,8777200,8777954,8778685,8779654,8780497,8781216,8781832,8782439,8783268,8783976,8784775,8785768,8786690,8787610,8788436,8789331,8790481,8791635,8792862,8794014,8795050,8795964,8797125,8798283,8799682,8800978,8802088,8803360,8804311,8805352,8806547,8807937,8809210,8810506,8811411,8812250,8813060,8813906,8814864,8815344,8815967,8816317,8816759,8817332,8817909,8818501,8819178,8819876,8820546,8821281,8822063,8822785,8823323,8824101,8824836,8825432,8826356,8827075,8827590,8828193,8828691,8829477,8830041,8830530,8830935,8831477,8831973,8832556,8833005,8833534,8834105,8834706,8835467,8835883,8836436,8836823,8837436,8837909,8838595,8839131,8839761,8840302,8840775,8841416,8841917,8842360,8842961,8843571,8844204,8844934,8845484,8846299,8846942,8847682,8848561,8849164,8849959,8850820,8851789,8853078,8854266,8855230,8856142,8857254,8857811,8858725,8859384,8860091,8860765,8861504,8862208,8863073,8864182,8865289,8866295,8866973,8867842,8868766,8869766,8870700,8871747,8872749,8873448,8874630,8875794,8876477,8877327,8878499,8879483,8880391,8881352,8882288,8883135,8884051,8884488,8885156,8886146,8886876,8887854,8888956,8889807,8891017,8892169,8893109,8894031,8894827,8895604,8896564,8897310,8898066,8899278,8900286,8901354,8902365,8903367,8904317,8905128,8905920,8906731,8907959,8908552,8909082,8910054,8911158,8912203,8913137,8914161,8915277,8916069,8916629,8917670,8918976,8919898,8920489,8921174,8922338,8922832,8923829,8924631,8925575,8926350,8927163,8928011,8929180,8930026,8931157,8932130,8932927,8933952,8935062,8935926,8936623,8937597,8938607,8939856,8940847,8941906,8942885,8943800,8944662,8945658,8946771,8948026,8949011,8949860,8950690,8951529,8952308,8953010,8953994,8954783,8955908,8957176,8958556,8959661,8960160,8960822,8961577,8962857,8963841,8965118,8966019,8966732,8967573,8968320,8969195,8970323,8971197,8971932,8972759,8973577,8974547,8975475,8976175,8976998,8978167,8979049,8979803,8980727,8981631,8982428,8983565,8984353,8984972,8985804,8986795,8987901,8988884,8989727,8990448,8991110,8991696,8992526,8993425,8994233,8994911,8995482,8996304,8996965,8997625,8998642,8999368,9000244,9001160,9002522,9003834,9004622,9005094,9005889,9006681,9007287,9007870,9008596,9009474,9010422,9011475,9012490,9013454,9014214,9014885,9015480,9016691,9017630,9018679,9019790,9020761,9021745,9022683,9023482,9024188,9025596,9026422,9027524,9028226,9028728,9029802,9031166,9032354,9034392,9036440,9038488,9040536,9042577,9044634,9046576,9048588,9050386,9051950,9053913,9055961,9058009,9060057,9061217,9062304,9063330,9065317,9067365,9068752,9069413,9071217,9073265,9075279,9076456,9077643,9078834,9080303,9081409,9082697,9083279,9084554,9086058,9087084,9088332,9089321,9089852,9090971,9091960,9092247,9092529,9093159,9094443,9095745,9096779,9097804,9098880,9100169,9101265,9102173,9103041,9103752,9104837,9105602,9106256,9107291,9108333,9109573,9110551,9111530,9112128,9112927,9113898,9114765,9115760,9116694,9117459,9118577,9119637,9120889,9122059,9123001,9123675,9124382,9125385,9126509,9127403,9128282,9128893,9129490,9130266,9131344,9132277,9132902,9133530,9134184,9134989,9135774,9136565,9137514,9138395,9139362,9140068,9140800,9141455,9142429,9143383,9144647,9145757,9146844,9147440,9148374,9149449,9150357,9151603,9152215,9153281,9154162,9154809,9155361,9155916,9157108,9158252,9159351,9159951,9160547,9161187,9161959,9162814,9163654,9164364,9165352,9166197,9167013,9167937,9168883,9169619,9170488,9171644,9172517,9173537,9174563,9175578,9176335,9177229,9178122,9178989,9180196],sizes:[72,30,230,215,217,230,215,217,230,215,217,230,215,217,230,215,217,230,215,217,230,215,217,230,215,217,230,215,217,230,1213,1434,1168,994,948,890,1068,1004,1130,1104,921,1141,1015,1044,810,1033,1157,989,973,647,956,788,1102,1054,1112,1224,1099,956,869,927,1044,1119,917,846,1142,828,909,668,763,1082,1243,1132,1203,1321,1163,1064,1144,787,1025,1150,1139,876,1182,1065,1059,1214,1018,1173,1068,1075,971,1173,1199,1001,937,1149,1056,1202,1137,1126,1137,1128,1162,1066,858,1112,1126,1143,936,951,1183,1057,1143,982,1113,1316,870,936,1039,724,1083,1080,1174,2048,1509,1344,1754,1887,1360,2048,2048,2048,2054,2047,2047,1875,2048,1606,1601,2048,2048,2057,1798,957,839,535,598,856,1118,1096,1305,1051,1022,1517,1589,1133,1232,1215,1357,1647,1452,1430,1421,1379,1570,1432,1505,1211,1548,894,937,654,881,820,1073,1067,516,521,755,953,1037,1077,1050,965,936,1009,1074,799,906,898,890,949,1060,996,906,926,1011,913,1025,1032,995,887,1061,1009,1015,1012,1004,1053,981,1023,930,964,920,965,1007,636,1294,1175,766,1437,1666,1127,904,730,1074,1702,1343,845,911,769,1629,1587,933,762,747,1680,1519,1007,914,756,1406,1475,1164,869,735,882,1718,1240,1048,827,821,1365,1696,1560,991,711,1167,1735,1480,1026,883,946,1773,1349,1437,1607,1121,1339,1503,921,1606,1320,898,1664,1051,1125,1576,882,1564,1293,918,1719,992,1051,967,1145,1019,1022,624,1304,935,1133,1092,916,786,765,1037,954,909,874,829,789,733,721,906,790,807,767,1067,1026,746,808,994,1137,1196,1135,980,963,819,1129,1096,1176,1084,929,980,807,920,1037,1029,530,673,716,1033,911,922,893,834,1025,1040,900,912,943,1127,991,947,914,734,1142,801,569,550,1140,810,894,1112,1007,812,1010,823,853,969,900,921,904,908,1118,884,1e3,989,825,971,1070,1188,1099,911,1029,1015,1077,1041,980,1277,1403,1130,1144,1288,1106,985,947,873,999,1243,889,626,879,1097,747,861,1152,1203,1189,982,1010,1023,1177,1064,690,855,795,780,656,1278,1223,1293,1426,1797,1826,1800,1901,1746,1767,1750,1812,1248,1181,1011,1139,1280,2048,1488,1307,1781,1780,1185,1211,1236,1004,2047,1931,2054,1926,2053,1930,2053,1594,1331,1770,1983,2039,1393,1447,1789,1700,1460,1798,1022,843,783,502,807,962,880,841,1205,1105,877,1067,960,791,934,1136,865,1083,763,867,947,891,874,1160,1347,1257,1070,997,1155,1090,759,750,811,784,735,884,865,856,859,1051,1453,1219,1194,1252,1270,1260,1269,1257,1454,1201,1356,1631,1035,1021,1030,1109,1050,875,1030,987,966,881,736,662,408,423,931,1013,935,1031,1180,1290,1365,1185,997,1335,999,1192,1301,955,805,856,959,1161,991,1632,2053,1760,1471,1931,1342,1558,1793,1341,1104,821,933,1144,1138,1062,1118,1096,1119,1183,1276,1008,958,724,808,576,898,780,731,767,593,944,1123,926,753,734,685,671,706,755,1129,823,748,690,932,954,569,709,789,1e3,1210,1129,1005,1098,1125,1067,931,891,1036,1047,978,861,856,885,881,875,909,1077,1182,1024,1127,694,1022,1358,1145,826,1171,1197,1053,917,932,873,1008,967,890,904,868,834,935,956,1026,1009,926,888,969,1006,943,870,823,820,945,974,980,987,899,931,878,950,970,925,979,1383,1489,1915,2048,1144,444,607,1043,1079,1104,1235,1174,1083,1086,1019,982,1195,1176,1815,1823,1818,1843,1847,1854,1837,1834,1734,1733,1731,1502,1191,948,685,798,757,843,623,666,662,831,1056,1187,1214,1744,1365,1964,889,913,675,989,717,753,708,1110,793,1027,666,1339,1169,1288,1343,1930,2048,2038,1510,1330,1152,1195,1413,961,806,887,905,823,630,810,698,724,688,673,605,553,656,868,558,684,646,694,640,514,759,893,669,543,566,606,993,865,1e3,679,541,1463,1325,1203,1145,1293,1361,1182,861,930,830,1023,785,947,560,813,636,1017,1192,1038,964,968,949,791,827,746,841,728,976,933,754,673,845,871,689,735,627,696,769,789,727,867,727,675,394,565,918,629,621,949,947,1072,490,660,940,682,945,866,694,678,739,825,577,653,770,960,933,1359,1419,1386,1054,842,677,672,636,892,1298,1050,977,964,908,795,710,795,892,767,835,880,838,810,576,718,593,589,705,806,921,868,1026,1074,1111,1013,1164,958,759,1135,934,715,786,785,759,827,786,766,724,976,899,656,965,950,1001,904,957,926,635,694,757,865,924,1021,931,947,867,847,931,679,682,599,1026,729,779,920,804,1043,870,1015,854,828,715,916,731,945,905,860,761,899,831,1054,1018,461,454,737,1189,1233,681,756,1063,785,650,708,626,681,468,433,443,658,613,698,569,815,1040,987,728,876,951,680,1005,616,580,1307,908,1179,1139,1152,1210,1064,859,564,403,882,1098,799,1242,798,700,815,869,570,621,665,978,770,716,723,804,840,775,829,1278,1185,1026,859,963,895,901,863,964,1018,1221,714,768,865,728,895,665,518,721,479,488,730,990,832,657,504,922,693,719,739,750,816,670,723,888,737,947,921,1237,1126,731,1134,1116,1293,1065,1061,911,1243,1060,986,764,873,1115,947,1073,966,873,933,1153,521,614,705,638,607,651,793,875,689,804,609,778,440,926,826,895,730,420,563,1025,834,1008,763,952,902,714,855,1126,1043,1357,1062,955,621,969,1239,1025,961,1205,1303,1298,1139,1094,1126,1031,1071,1135,1124,1271,816,549,1020,797,580,584,892,1017,780,672,1275,995,1536,1378,715,681,860,619,1104,989,913,927,1194,778,908,1075,894,884,750,1062,712,782,842,684,1299,1054,943,943,1001,975,569,417,1031,1051,1142,1155,575,755,747,878,634,1e3,552,363,583,909,900,673,1071,636,617,692,766,851,1113,1229,1248,1176,1045,1161,1171,1194,992,1067,1104,1061,1359,1148,995,692,959,1123,1043,831,850,698,884,827,890,784,682,840,682,821,725,606,858,811,879,961,889,918,660,845,722,998,983,714,873,733,955,1049,998,678,708,901,1149,744,944,777,924,820,834,761,1003,671,424,635,640,506,592,505,535,671,714,743,923,734,533,866,736,958,627,723,951,1262,1220,869,928,822,990,909,1168,932,1031,882,937,914,977,1090,928,787,817,995,911,879,1054,657,999,777,901,1011,828,973,1201,866,967,1140,787,1272,1256,1050,1119,1121,685,596,1233,778,883,475,938,933,982,867,678,765,673,818,715,574,752,768,607,1085,657,694,907,730,860,966,942,764,934,1099,808,793,954,805,1055,711,974,1169,852,958,1382,1505,695,641,776,753,514,599,998,941,927,1017,982,931,934,782,675,762,1311,1502,1205,1319,946,1158,1094,955,641,1210,747,718,941,678,605,647,654,592,661,781,891,877,1103,842,878,806,989,912,655,816,889,891,880,1028,983,460,613,886,525,795,981,745,656,904,1036,796,1234,1e3,791,884,763,930,722,1179,902,802,826,1243,1145,1218,848,916,1112,1021,1196,1048,1012,1166,1125,1125,950,1047,777,853,1001,1024,1073,1057,775,544,449,519,963,1041,1025,855,936,1152,873,985,821,917,840,783,689,1170,1122,1164,1285,1131,666,1050,1072,904,930,603,823,784,890,1003,1068,1330,1198,1163,1327,1295,1277,1035,1289,1105,1156,1111,1076,1057,866,1077,894,995,1289,1224,869,999,905,651,493,671,733,877,1149,965,1102,1065,841,971,1183,858,1341,987,955,1235,842,1202,795,869,902,1132,1095,1101,1020,1097,681,637,654,726,783,926,783,740,830,816,800,754,1061,845,931,810,982,867,777,850,637,758,781,854,1332,1071,881,886,699,870,749,705,770,975,763,631,1076,983,724,859,854,1103,415,725,776,822,743,1495,1371,1271,999,1043,1079,506,816,866,1039,963,1007,740,814,1013,478,343,632,665,808,849,913,958,826,824,755,870,1068,1516,1054,870,1001,876,838,943,1048,1019,785,855,1099,986,1328,906,756,718,1063,999,826,1009,942,1015,851,798,616,776,950,854,1106,1371,1055,1093,1015,972,802,866,811,777,741,812,827,778,948,698,1085,639,999,981,790,837,454,812,1025,854,864,944,1084,726,1087,1058,858,1056,690,1058,869,1081,949,1249,741,1064,711,819,818,1107,1001,1234,1073,1306,1071,1087,1121,455,353,515,1113,989,864,980,1042,934,695,898,1079,1016,1079,1054,703,934,742,943,1017,946,1033,973,867,943,947,1093,785,979,796,413,852,814,919,921,914,1082,663,592,858,1001,1232,1011,1069,1156,1133,1250,804,1164,875,672,901,916,934,1186,1116,1044,1056,978,1061,740,1102,717,873,518,759,1088,1017,888,855,589,730,1102,788,901,1012,915,1008,929,865,774,881,846,777,767,863,931,1082,1027,1202,795,918,842,946,1033,1173,820,884,868,831,494,463,578,978,746,758,952,762,779,1051,1271,764,720,791,817,949,993,832,849,843,772,639,795,517,705,937,777,892,911,916,925,740,911,902,764,908,721,894,934,1071,770,1005,907,688,823,1072,953,779,966,1067,879,594,736,956,775,742,541,716,692,742,858,923,855,829,1122,316,404,474,653,995,860,917,1003,717,897,818,734,820,962,742,827,668,837,815,810,683,842,706,529,803,1008,840,917,922,1060,1155,1165,715,1030,1150,884,1007,929,850,842,752,839,841,801,859,887,796,766,992,910,683,726,643,800,652,1063,725,740,974,779,1280,1219,798,646,864,812,763,640,719,664,867,633,831,840,778,791,765,937,862,939,1256,835,622,1276,839,973,941,953,670,663,668,935,909,896,763,1004,726,640,790,888,958,1046,770,913,896,819,763,845,908,888,796,985,836,1009,759,705,954,725,833,669,800,956,869,773,852,757,972,996,976,743,902,777,719,635,1091,805,655,1131,1287,969,1062,1318,1074,798,1087,872,1012,1171,1125,742,1045,735,1204,1296,1421,935,816,745,1202,1124,710,1060,878,912,625,878,969,841,753,965,756,850,1058,674,701,767,552,609,1048,960,817,1061,752,826,1010,907,553,962,787,897,896,495,513,394,390,725,633,732,803,756,756,687,656,573,950,770,914,465,489,618,401,461,463,467,545,477,455,811,716,902,1027,813,792,749,893,896,737,881,798,883,918,777,621,868,970,602,940,856,1112,937,545,859,902,776,735,861,699,937,932,942,588,1121,1052,854,945,1259,1227,1056,1025,1101,1156,916,891,958,857,993,1020,833,975,783,961,756,738,803,889,623,854,771,1145,948,1069,1106,1221,1060,702,675,862,959,912,937,1010,900,900,895,890,801,552,1061,826,870,1022,1002,805,683,786,833,989,863,800,1009,978,913,736,850,871,1041,1095,899,936,780,956,1048,841,1042,1095,963,948,849,887,658,707,629,714,999,1122,393,699,510,974,899,874,950,718,1122,1013,929,786,768,1206,944,785,1049,1078,962,675,918,1152,887,952,1003,830,1044,1037,770,1130,1079,1099,855,658,1162,1111,967,736,1025,1167,1219,1003,867,1271,1170,1212,1180,1178,921,1406,924,959,1022,830,741,797,1148,1003,923,778,1019,797,899,1061,1218,895,931,876,819,797,1100,934,1112,1044,1163,851,709,846,711,815,871,719,678,627,449,850,760,792,1048,904,895,911,991,1107,916,1054,1101,789,916,441,419,497,348,562,305,798,473,427,501,432,522,564,563,410,552,409,542,597,556,375,535,423,507,403,721,613,580,632,347,333,365,342,337,549,656,611,451,667,481,487,610,473,495,476,627,485,520,486,974,769,848,759,854,648,781,1155,982,754,1020,996,1061,1122,750,661,626,626,609,583,697,601,683,590,843,929,1162,904,1230,749,1117,783,1115,1168,1146,980,1338,1054,1082,1083,809,848,826,961,911,924,1282,917,860,744,1047,749,581,1001,1238,1130,911,996,751,705,1060,825,941,977,1187,1288,828,978,1049,1185,1229,1321,1099,884,1193,1266,1222,1451,1196,1142,1149,1098,1331,1184,1066,1142,1103,915,805,811,781,598,602,753,772,896,811,1052,463,469,445,1045,725,947,1019,1054,1013,1060,1105,1065,1073,858,768,999,1049,816,820,884,736,484,1100,783,765,664,726,450,863,899,736,800,686,633,719,793,447,745,757,1115,1026,1127,851,1047,1251,905,991,847,870,858,793,1151,1392,1091,1097,1097,1067,1031,972,1108,1227,935,836,830,982,1004,676,800,611,747,850,1024,958,1070,958,980,838,884,664,771,945,1120,1053,967,1142,1080,1139,878,1601,848,823,714,1104,1036,1104,884,802,550,948,1001,1008,1090,954,891,867,634,515,1024,725,1018,832,994,811,1070,916,776,1096,1043,1078,856,1102,1129,1027,862,972,929,787,773,764,1168,550,1390,1159,969,688,890,918,883,909,1036,1050,1065,944,1056,999,1111,886,1070,1113,934,1079,1157,1204,682,1422,1336,1222,902,773,950,1022,846,597,887,1233,800,1019,959,949,989,1088,848,571,906,993,1036,734,808,833,743,953,1018,1224,955,695,949,1047,771,934,894,1105,1064,1247,1033,775,810,973,814,1013,771,874,1099,963,755,878,922,674,616,460,435,524,526,764,826,944,868,960,1051,777,737,728,500,1086,946,602,482,531,665,387,328,351,624,993,1086,801,1014,1081,1073,1258,1025,781,499,570,383,899,1057,942,852,782,748,1130,975,762,641,918,697,1071,807,695,527,801,648,842,995,1041,903,1069,1244,1514,937,660,653,727,450,693,835,880,532,847,1057,922,883,631,970,992,896,742,873,597,802,1085,1100,1120,1132,1070,1239,914,1038,1040,747,1044,943,854,782,992,964,933,1028,1078,1009,876,869,1122,800,1123,962,841,815,923,1127,1126,1206,934,1057,675,661,741,1052,1058,662,710,996,807,971,957,853,877,911,462,709,465,837,866,686,1245,975,991,786,970,952,952,877,919,968,956,366,665,910,893,1001,605,551,823,657,1027,1005,1093,865,1002,1009,801,823,1036,904,1303,971,716,1130,1268,1215,1115,893,805,1318,1077,928,544,857,1075,1246,1324,957,1145,1180,1252,1230,1364,1378,1387,1404,1328,1254,1294,992,1251,939,1223,953,851,1244,863,850,836,919,1191,1032,646,1217,1109,911,893,746,834,1108,753,947,1146,1042,1200,1385,1041,1431,1414,1613,1152,1065,1391,1315,1191,1289,1089,1199,1389,954,1209,970,1080,1272,1413,1225,1029,932,782,703,928,901,752,715,883,710,1004,607,821,661,736,864,1046,764,938,1073,588,1028,920,1213,1145,1367,839,563,590,714,608,609,679,744,724,750,748,721,649,770,580,644,567,577,585,522,1186,1146,812,513,1021,649,958,1192,1040,1152,968,1808,1469,1685,1104,1072,983,1021,522,432,433,910,762,585,464,498,554,936,816,1191,1091,1314,903,878,780,972,779,825,1129,1197,752,846,1176,903,1004,858,921,918,717,909,1007,1119,1110,449,622,796,819,1036,1044,923,590,892,1133,929,1019,970,338,961,845,1124,930,816,983,1071,1137,1211,1125,959,915,904,1130,970,993,1013,910,743,548,801,1085,868,1430,1e3,1163,1221,932,1205,1356,1108,787,877,943,893,902,774,1059,1145,1245,735,820,1205,1064,1034,1211,1383,1292,727,663,1299,1064,976,1073,910,977,806,948,1050,554,1029,1200,1117,1103,1002,973,841,883,805,535,792,1081,1190,929,1056,821,773,1069,1041,945,801,1019,970,964,1093,855,808,901,925,1087,928,1150,932,985,1016,1132,1201,1513,1009,1035,896,1052,889,707,1016,806,536,1097,1019,902,687,782,646,759,642,1020,1132,859,752,1004,739,1117,1004,721,1113,974,995,869,1077,1003,966,899,966,1051,949,797,763,810,1253,1224,1172,768,837,1045,885,868,989,1198,759,638,906,1027,994,1001,840,913,1012,885,536,1149,976,1059,1020,1201,1106,1009,971,1128,1180,1032,778,987,1066,740,853,1007,1231,1089,839,544,900,941,1291,1208,880,905,788,1236,925,709,914,857,949,1251,622,696,1017,975,863,454,745,877,1242,1279,1188,1013,1287,847,998,820,898,895,852,1148,1220,1045,930,1028,765,992,897,877,959,957,904,1222,906,1251,881,1153,1128,1153,1279,1027,963,888,787,945,1341,1128,940,610,930,940,1048,497,705,946,962,1031,853,1070,771,1093,760,718,849,963,498,731,870,878,884,639,654,972,868,559,559,821,946,844,837,939,816,979,763,969,939,958,748,762,1049,913,1012,627,529,351,583,700,533,706,800,460,878,753,298,580,726,967,781,680,778,809,663,619,1046,990,895,911,823,1202,1042,939,1112,1095,894,620,698,770,862,1e3,745,720,515,1045,667,648,525,1299,1273,877,941,780,795,1006,843,666,1035,913,826,1137,770,881,973,1014,1164,1030,1010,917,893,518,757,1008,1047,1083,1088,940,1071,862,1127,1147,1129,963,888,846,899,888,879,1057,860,1304,1216,1167,1027,950,834,878,966,912,1060,874,1008,952,946,703,760,1116,822,1046,915,937,1168,908,757,1051,1198,965,866,951,900,720,676,1019,942,1096,1194,1253,1099,1165,1203,1200,1192,1361,1130,969,885,724,829,655,606,647,665,912,985,710,726,878,824,666,921,738,739,720,668,361,614,608,699,817,617,729,506,540,517,635,581,684,695,413,472,679,609,813,577,442,314,563,768,761,618,677,625,581,516,693,713,864,879,683,881,1289,892,923,640,867,1133,1049,873,855,703,889,757,927,974,950,1100,767,892,900,949,1091,1326,1465,628,709,1413,1041,923,842,775,1048,1029,1060,985,1085,965,831,853,832,954,912,910,943,828,745,845,998,1106,994,1062,877,724,810,958,700,902,858,889,1002,800,867,1017,860,871,934,660,580,721,952,1116,1011,1375,1227,1122,1361,1229,1227,879,673,1153,771,663,744,915,1244,934,973,1182,1404,1285,986,1010,809,1128,1169,558,1094,1239,908,1378,1054,859,752,922,860,567,1019,708,571,1038,990,1064,1372,945,1179,803,813,944,1313,1064,1065,949,1163,1218,1200,1290,1173,1251,1173,1021,1125,1024,1043,852,925,861,962,930,1027,1132,1316,1159,1165,1058,1153,1062,1194,1201,1030,556,588,622,717,546,629,587,914,613,885,682,500,741,672,1178,812,896,788,1156,982,989,797,1152,998,1156,591,557,697,517,843,1047,943,982,657,1001,1082,929,939,820,791,674,843,1105,822,931,967,967,897,752,1032,935,1260,1079,991,1071,1330,1416,1502,1277,1178,1026,1053,860,1022,841,1016,1143,998,564,665,776,1070,1145,1215,892,1115,805,851,721,805,644,536,1083,963,700,556,763,817,639,945,855,548,571,867,540,458,809,474,496,891,619,609,758,688,656,829,913,735,1041,771,1544,1567,1554,1603,1606,1596,1601,1573,1597,1571,1603,1591,1580,1573,1610,1603,1547,1154,1014,1020,773,692,737,880,745,777,712,1064,534,676,991,819,880,883,1194,1328,1265,775,1125,1207,1069,845,780,1046,877,1071,706,976,593,861,952,835,741,650,778,744,574,949,697,921,844,1146,1227,1152,1217,1103,1151,1204,1115,970,1048,1141,1306,1121,1022,868,913,564,1498,1405,1090,1313,958,669,543,675,789,895,776,758,1316,1109,893,1125,966,925,1048,909,813,715,964,596,488,614,1116,587,1097,735,799,935,1299,1018,957,715,829,907,808,622,1181,863,795,963,700,691,611,564,686,586,1076,1122,703,964,549,584,1071,916,1049,991,828,845,890,1055,969,908,1033,946,1035,938,1001,1222,1025,967,892,754,1157,1156,1008,614,663,810,673,678,607,867,772,868,902,972,790,847,830,1057,1070,629,556,659,529,908,758,997,562,763,1054,891,1053,726,859,893,1025,1178,1084,743,933,1100,788,1139,880,935,973,898,885,1050,1162,939,1074,808,886,746,665,1104,929,783,737,883,908,1068,1075,974,1039,1097,666,829,814,767,896,1043,1113,1217,1267,913,557,716,799,484,840,860,884,848,1134,736,714,837,1111,616,952,1283,1174,1e3,975,1185,1282,959,1363,1190,1023,930,992,1221,1086,699,1009,991,807,1074,773,770,792,1202,893,992,1294,657,912,564,1215,942,1161,1338,962,693,523,616,765,780,626,729,828,703,742,828,606,681,933,1070,862,833,1003,1234,1112,743,986,579,627,574,614,680,780,946,808,769,881,717,1198,972,1047,968,776,701,777,1008,860,1004,600,916,515,766,764,854,858,965,896,909,848,1109,970,1009,1071,826,784,1309,1010,1203,1029,810,821,755,739,770,1142,1018,901,1033,1045,1103,722,1151,1038,1085,1169,1445,1162,1291,1205,1090,1178,953,1074,1049,1306,1087,761,1116,959,977,1084,979,1005,906,1086,876,1060,1029,1034,1262,993,1059,1058,786,1014,1260,956,1062,813,967,1172,751,1023,692,1120,1171,1210,1220,1095,788,930,923,1205,1039,1089,1273,1112,1191,611,636,1186,1156,1186,925,1077,959,934,906,1145,840,816,771,749,1073,971,1055,823,976,796,966,921,996,1130,1377,1133,1197,1134,1243,573,550,608,437,502,431,719,813,797,1046,652,690,634,956,790,1125,896,1120,979,876,776,996,845,1004,557,983,622,515,534,582,520,688,788,823,676,715,541,487,551,507,469,526,470,665,799,1073,1027,840,771,1106,897,1017,802,1057,923,818,973,816,961,1155,711,705,718,942,868,772,971,991,784,1169,1122,883,674,1066,676,924,984,861,794,969,840,726,828,815,802,851,834,800,845,1010,1086,873,1110,935,974,899,918,1233,1113,879,904,924,749,986,683,818,1009,1029,861,870,870,720,907,825,768,1098,1022,1103,808,1008,858,774,776,978,987,1014,770,770,849,862,822,1134,999,1226,679,772,935,1151,925,773,737,1127,851,940,860,697,1239,1161,1048,1051,948,796,1131,711,1002,705,1226,1243,1212,806,1312,1247,1532,1409,362,384,373,353,622,1056,834,847,642,624,872,736,1052,884,1053,1130,1116,1127,970,1380,1257,1168,1e3,1038,1081,1098,1136,984,996,928,436,763,614,631,658,754,1030,886,1379,787,1052,943,719,917,986,805,849,914,963,801,863,898,1087,859,974,1009,893,984,1052,914,704,1021,892,848,1112,902,1119,858,1143,1054,635,899,832,760,845,1174,1109,533,925,474,1049,860,600,538,754,971,1023,633,702,818,961,739,674,602,792,945,770,1086,904,792,1002,849,1081,836,712,1021,577,269,260,232,231,190,150,757,976,884,839,716,850,722,1110,1114,1078,793,1314,1174,1205,961,928,857,845,941,820,977,774,1513,1374,1155,1244,1098,891,1447,531,1010,1124,759,1231,895,1095,1181,1240,979,1170,834,929,1268,1055,959,1210,1243,1096,1183,810,902,1037,1147,1198,1216,1135,862,972,1039,1038,1288,1140,1256,979,1174,1080,1008,963,798,786,1164,1108,828,1146,845,1055,1082,775,958,961,1100,1212,968,808,1094,1016,799,1040,801,1406,1041,870,957,834,748,874,839,817,1011,1544,702,761,857,893,851,728,768,437,897,1173,1180,1241,1130,846,733,1001,1052,1185,1142,1210,927,1145,1329,1288,1150,1200,951,1254,1049,965,1143,1179,1204,789,938,1278,748,810,427,738,887,815,1027,932,618,1056,799,1079,812,801,619,856,762,796,678,691,696,791,757,647,927,522,790,731,740,1088,594,650,529,623,830,773,892,716,619,709,871,1015,925,642,1046,588,650,836,814,751,991,1060,930,818,1238,890,730,821,907,829,738,873,894,862,1263,682,1189,1232,1238,1269,1214,774,671,674,750,658,796,1262,967,945,1189,1203,940,1214,1068,845,1009,1062,961,755,725,668,918,1075,1140,1111,1162,765,931,1138,1207,885,547,1122,1080,915,1027,993,915,1042,811,764,1053,1050,734,922,1080,966,897,974,1278,987,1393,1201,1095,1047,1135,966,952,1091,905,981,711,1086,1050,680,623,1160,905,1186,588,992,647,1032,660,957,642,638,766,794,637,883,951,1053,976,932,1032,841,733,691,1091,809,1064,769,770,1023,935,907,761,999,562,1093,1189,974,979,1100,824,687,1066,1004,1057,1194,1037,711,907,801,1016,1073,1229,1096,1092,1111,782,945,781,949,1075,942,921,1003,691,704,903,1116,1116,935,736,486,938,779,634,582,841,729,768,508,508,600,931,1016,998,1220,1041,1167,969,1207,746,1162,1112,1055,1149,677,983,947,1045,1295,1268,1301,848,1055,1154,1066,958,715,975,717,830,998,1111,764,930,660,978,1049,1039,950,1024,962,917,1094,921,968,903,1052,1038,865,1052,862,1042,1149,809,858,845,900,772,922,948,702,1020,1118,1049,951,938,1099,1119,1113,959,1171,1210,893,1087,1054,982,996,1215,955,1106,1122,953,1089,1083,618,1114,821,968,1333,1225,1117,1203,1227,1248,1172,911,1034,1045,1179,1035,1131,1380,971,1179,859,1321,909,708,948,1184,944,577,1019,927,901,751,622,673,718,568,671,640,618,477,656,812,830,957,793,1145,1042,742,1315,1185,720,1010,713,1070,1049,966,1182,1152,1007,1317,1138,1082,926,1226,900,694,927,995,832,1002,1186,888,1159,1281,1203,829,1180,576,1020,1275,1380,1044,624,1101,1151,1107,967,1126,1143,933,743,1058,1146,958,1071,1038,929,1030,850,1230,1024,1103,1029,976,1110,901,917,783,1017,1171,807,952,987,971,871,977,909,881,845,708,772,979,885,1464,1243,1076,1072,910,531,1044,1211,734,841,828,864,645,899,942,1328,1142,923,780,1051,1084,996,1056,1173,1105,911,863,1095,682,1145,839,948,935,589,602,777,957,1206,1160,1150,1011,955,1040,1035,865,1035,1041,1186,1266,1242,830,1180,978,1076,1050,953,634,1092,1023,958,1005,996,1175,1184,860,652,891,829,1228,926,727,984,931,947,683,1141,966,1307,1261,1013,462,1110,911,1098,1194,1184,1036,1140,1021,1012,1151,873,1221,956,1068,715,1087,969,1210,1137,1336,1326,1265,1125,1062,1105,1012,955,1129,804,865,728,879,782,876,1092,925,746,1037,1220,1004,800,765,770,667,712,890,808,908,902,1041,768,573,850,1083,678,912,1010,1038,986,1194,959,1231,987,1103,1094,1047,838,687,1001,950,998,964,1125,989,1228,848,996,941,789,855,984,950,962,1153,810,655,627,551,848,1138,941,891,986,885,1045,1065,1058,742,790,1174,924,928,780,998,940,961,861,1097,923,892,817,863,802,757,911,1015,742,880,1155,1445,1031,843,896,955,1218,1322,1393,1299,1105,1215,955,958,1019,938,880,1053,1122,1066,931,929,1154,975,805,948,1246,1167,1126,1113,1008,899,1036,1087,1129,1086,767,645,845,729,1091,999,1326,1331,821,984,1022,1035,1186,1069,1093,977,875,1014,1308,1036,1112,678,762,1027,1235,1112,1161,1152,1029,715,950,1012,1001,773,949,861,971,694,1466,757,847,864,750,738,1225,960,1030,945,1152,1183,1094,930,395,1117,880,1163,932,920,710,687,945,1155,1522,1298,962,1086,985,1342,1583,1390,1296,1044,1106,709,984,849,952,827,922,1178,718,856,989,924,748,1210,713,946,861,973,818,921,925,1059,676,729,880,836,1369,801,764,757,1158,1139,688,1006,782,1092,608,866,877,885,1031,710,744,921,697,631,780,1133,1234,769,1063,1249,1046,1126,1078,1059,1199,980,1229,859,1339,679,1268,1157,1163,1014,1009,1129,1070,1144,922,970,1331,998,953,1221,1009,1131,1205,969,904,906,1433,1286,965,1069,1061,945,1180,1186,892,1216,1219,1063,1021,849,977,1063,1050,1200,924,905,1339,1437,1153,992,960,836,819,1117,1086,842,1019,1134,981,1042,835,780,1094,866,573,556,1230,1072,964,716,791,975,1393,694,608,599,1076,1615,1579,1499,653,1114,768,700,1062,736,580,664,1148,1076,1141,1004,941,1089,970,1152,1143,892,988,1132,762,1213,916,864,1229,1031,1116,936,946,1071,955,825,995,1016,1034,1087,954,1111,535,549,473,604,917,1030,734,920,761,1043,913,805,778,922,1092,1233,1100,870,723,715,675,613,787,540,642,541,802,1069,1152,1017,800,380,1288,1141,1120,933,1156,1080,1042,1175,1014,1118,1219,997,1207,790,897,1010,979,892,1133,1052,764,1054,1195,1061,1123,1055,759,1157,452,970,1252,978,1250,886,1025,986,1109,844,1106,1173,842,1031,555,1111,991,741,833,669,977,1022,721,778,677,1111,862,815,873,1014,1098,1295,1120,797,1299,798,932,542,777,1164,769,877,903,776,1042,1002,939,904,971,667,1125,1119,1196,991,1186,963,1349,897,1056,1188,1158,1043,1002,923,1302,915,1121,917,822,1085,1096,1059,1068,930,912,672,665,955,1211,1119,1132,1040,1178,1030,986,846,818,924,1021,1070,856,990,622,673,983,917,944,804,1038,696,775,834,1190,1091,1041,1218,1263,1307,1203,991,1212,1045,1209,1180,962,1095,976,985,1109,1072,1162,1134,878,1243,1184,1114,1102,542,1067,993,1029,1093,1204,1257,1064,1203,1130,1005,1103,1119,1255,975,898,869,958,1230,1192,1027,1093,1212,1118,1180,1002,1058,1205,1060,998,913,840,840,1255,642,806,775,1094,729,721,1002,860,1032,1394,1246,783,1206,1359,993,768,554,875,630,602,744,826,753,679,877,833,660,652,828,837,1175,1315,942,554,620,914,718,1172,939,481,797,1138,1048,1104,1356,1154,665,772,1080,849,1109,1220,1019,1107,1297,720,1215,1011,1209,1046,1159,1104,788,605,1044,784,867,892,966,768,512,1051,839,762,767,649,891,839,1294,880,894,1003,901,932,1031,640,683,724,775,772,746,535,767,714,741,903,1207,931,1311,1054,1287,1107,1339,1158,930,979,1590,1451,1129,1067,971,789,742,780,1065,847,681,797,1043,859,603,863,859,893,917,833,841,803,989,1025,797,916,839,851,826,956,893,694,967,854,890,579,876,799,738,967,806,862,1001,913,1089,834,685,854,750,732,862,608,1028,668,957,815,522,627,781,751,728,712,946,713,754,822,874,980,855,703,806,896,1044,995,945,870,1007,782,685,853,772,717,696,782,956,1203,1192,827,811,793,739,911,769,696,657,695,621,692,803,845,999,820,864,855,820,951,802,878,847,809,941,1174,1043,752,1070,1006,937,1173,1105,1207,1174,775,648,602,622,808,654,531,720,981,617,886,886,667,824,794,618,592,858,806,671,639,527,681,668,783,724,734,850,752,833,957,1025,938,924,1020,951,769,637,850,1065,1083,947,889,674,549,796,705,884,858,1004,1049,921,819,978,805,468,541,981,773,817,910,865,900,1249,1052,1115,1352,1018,1102,1127,965,755,550,904,1104,1174,1033,1110,974,1041,893,1211,908,1021,884,647,1303,945,677,711,689,904,715,886,1251,1360,1075,1137,1140,823,866,971,1110,1026,1059,955,983,1329,1054,1230,968,1043,1347,677,839,744,685,907,607,745,643,762,1099,850,603,742,743,1079,1017,763,1232,1336,1021,663,714,889,1098,1208,1160,1231,1006,994,1224,971,1012,1037,1e3,987,894,1105,1004,1138,1070,912,1141,1029,726,1020,1093,1096,879,984,990,1130,832,897,930,786,972,1199,1074,1097,1228,1245,1094,946,1253,977,752,1105,1026,1299,1369,1097,1105,1070,595,1087,1278,1043,1481,1075,1116,974,695,712,836,876,656,570,537,630,514,750,734,922,687,733,1054,768,694,640,924,569,571,835,922,738,1218,922,673,690,732,449,583,854,936,799,917,1146,1093,952,876,788,1027,1219,1221,816,932,783,1083,813,814,910,874,656,704,881,528,559,864,1190,991,732,1038,1192,1056,995,1085,1014,1344,1194,832,1085,1195,1142,1110,1075,1289,1211,1398,1305,1370,1417,1178,962,1083,1012,966,867,966,1044,723,1253,1390,1214,1247,1081,1143,1135,906,957,922,962,1124,911,835,936,1029,738,839,845,916,894,1019,1197,1021,824,870,852,1219,1240,981,750,787,871,506,923,1072,1275,645,481,798,709,870,688,1087,980,1465,1122,1062,1048,1185,989,941,792,1203,1026,1320,784,1067,1044,817,807,845,729,594,728,699,689,923,862,916,898,1196,1221,1093,1022,1260,1032,767,578,1059,1246,1063,810,954,1048,1096,854,815,683,1298,1295,1024,907,686,929,847,980,758,649,884,1042,740,786,485,743,787,767,917,986,803,821,1005,906,1015,850,727,571,514,686,999,903,861,976,629,701,912,1262,884,857,966,1174,935,589,952,997,884,855,682,961,736,373,678,651,1088,1049,1034,674,627,1023,1130,897,1014,568,524,759,537,588,716,937,954,1021,826,676,1096,959,1215,1129,1086,948,1057,1241,652,879,793,793,1047,1211,716,835,971,935,1248,1214,1315,1197,738,982,805,1237,988,887,889,1107,1056,744,1268,1026,1211,1158,1260,923,740,640,670,776,811,790,942,872,828,764,709,954,858,1137,891,804,894,871,658,853,847,880,1206,867,740,653,780,966,789,939,980,640,837,1106,1103,933,582,956,821,809,886,478,1009,977,980,971,806,873,923,671,821,872,1029,1078,1173,995,1082,900,716,1055,1109,752,1003,942,1084,883,1117,690,887,762,619,651,1085,696,798,943,1025,1313,727,778,1473,1019,948,1248,1158,1287,860,848,921,835,868,942,1031,991,1057,1100,935,919,1398,1214,1085,558,913,1196,1043,846,1011,1061,908,897,854,1176,1214,949,1034,1e3,1003,898,576,1489,1271,1685,1707,1690,1358,1742,1662,1713,630,304,142,25,95,90,94,25,95,90,94,25,95,90,94,25,95,90,94,25,95,90,94,25,95,90,94,25,95,90,94,25,95,90,94,25,95,90,94,25,95,90,94,25,995,1699,1685,1703,1417,1722,1669,1412,1405,1722,1669,1406,259,1390,1722,1669,1368,137,283,95,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,502,95,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,505,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,212,423,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,216,1685,1707,1690,1328,1742,1662,1713,1370,1699,1685,1703,1394,1722,1669,1368,685,1742,1662,1713,1358,1699,1685,1703,271,1685,1707,1690,1025,299,1059,1699,1685,1703,721,1742,1662,1713,667,1685,1707,1690,1026,1408,1722,1669,1376,1052,1699,1685,1703,679,1742,1662,1713,723,1685,1707,1690,977,1014,1067,1001,1207,1428,1662,2048,2048,2026,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2054,2048,2048,2048,2048,2048,2048,2048,2048,2048,2057,2048,2048,2048,2048,2048,2048,2048,2048,2057,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2057,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2053,2048,2032,1604,2048,2048,1738,1940,2048,1934,1737,2048,2048,2048,2048,2048,2048,1720,1817,1905,1952,1541,1708,1589,1726,1800,1360,1614,1708,1745,1563,1444,1087,725,1108,944,835,824,910,594,588,636,892,905,633,658,697,606,579,359,384,635,852,650,886,759,629,840,610,664,676,631,481,454,364,530,581,561,738,650,693,752,668,865,848,870,860,685,467,506,747,849,586,585,706,845,735,734,728,561,523,446,356,499,559,647,618,644,686,859,512,528,458,1145,753,858,589,703,579,523,611,1257,962,748,890,771,813,876,541,626,562,748,535,571,767,763,719,730,847,820,526,937,716,750,687,664,766,424,565,742,722,1130,652,728,738,702,1083,658,723,1009,979,806,700,679,831,669,602,698,700,842,515,647,887,788,1191,693,804,631,622,701,597,623,597,675,721,779,501,632,719,787,815,1114,633,746,639,643,686,560,608,534,654,682,681,506,623,616,824,657,993,1018,1186,968,1080,1080,920,871,823,1171,1038,1076,953,840,846,889,608,456,511,734,875,621,872,560,595,458,533,744,807,649,876,437,685,376,483,316,593,541,620,854,568,564,556,530,379,393,408,439,404,847,883,662,749,664,584,691,537,509,839,1304,984,698,865,730,864,734,802,479,451,479,535,526,556,803,756,712,685,840,677,596,565,623,493,600,690,1039,894,590,641,788,751,859,733,718,941,825,996,1008,786,659,677,788,638,615,650,681,737,722,674,770,550,1131,820,729,636,639,723,636,573,644,655,700,700,469,767,1277,689,747,622,622,694,588,611,521,652,706,687,633,692,726,1279,637,716,623,623,683,578,564,530,629,619,647,694,628,661,722,1176,1022,1067,1073,956,1067,853,785,804,1158,823,744,878,812,850,939,787,709,613,798,652,790,555,882,1380,1403,1133,459,989,986,963,999,1133,929,689,819,713,773,732,682,674,622,1183,1098,844,962,841,638,722,779,753,747,691,669,1217,1296,932,923,747,779,545,656,635,616,550,687,691,521,677,686,839,617,860,708,834,512,426,502,667,857,564,554,556,637,459,528,804,694,632,692,568,541,501,454,346,480,521,485,561,828,479,459,495,649,1195,758,792,557,671,1146,832,995,1193,950,874,809,1043,971,1141,881,768,699,952,897,1043,1058,899,825,1029,954,1099,955,901,775,962,797,1173,932,1107,861,674,738,746,598,784,708,597,696,716,811,646,643,1147,526,738,1048,691,653,879,1060,813,500,432,634,544,461,577,480,613,610,749,1087,778,736,541,747,586,543,804,1133,1047,1015,790,754,904,620,721,736,789,701,629,779,573,629,807,530,755,466,407,472,468,1030,935,885,914,1150,714,694,781,846,754,750,681,632,804,619,496,734,1164,670,795,633,775,728,737,758,668,666,675,668,498,474,839,1061,665,667,568,743,586,879,1053,559,625,574,564,577,589,599,1056,927,766,753,800,754,826,924,577,707,745,597,650,1210,820,904,782,863,768,813,793,552,443,489,489,427,485,573,712,691,1085,1052,657,802,772,533,1081,829,770,795,551,719,601,522,617,1081,1086,1039,1133,777,933,815,785,664,898,920,670,395,346,755,760,655,837,694,674,838,1207,801,701,883,651,801,767,721,599,780,1025,1049,751,1226,540,809,761,810,335,453,385,438,472,889,564,461,414,710,490,473,500,375,375,382,779,770,614,866,344,523,448,368,272,265,435,357,395,363,369,280,576,845,295,441,399,438,863,689,355,358,344,352,591,671,792,508,595,656,537,512,827,1312,965,624,500,658,833,846,748,756,381,304,363,295,519,527,556,810,749,707,679,818,675,601,587,649,470,580,586,997,1003,401,450,384,413,572,661,693,655,650,685,636,649,1003,775,1154,779,699,663,723,549,639,572,680,666,664,712,662,672,462,527,1216,717,693,637,620,612,550,609,593,631,592,752,535,629,1051,990,690,710,609,629,670,576,545,668,680,678,672,653,780,942,1005,618,674,633,631,677,574,575,500,630,673,664,620,617,717,964,978,1008,880,982,898,920,888,658,637,957,976,676,785,818,840,940,838,755,693,636,807,711,746,551,759,1555,1569,1629,1626,1016,923,917,917,913,927,931,1155,795,725,851,676,742,749,666,667,657,1233,977,698,764,647,685,602,635,544,647,602,620,558,448,641,1039,1316,1086,1001,914,767,745,612,581,500,649,614,550,688,680,515,599,714,999,528,705,735,709,583,414,375,327,415,676,771,521,468,446,488,331,381,447,448,729,701,589,783,371,486,458,360,246,249,253,498,340,385,361,276,394,523,655,703,371,380,448,258,628,647,1015,677,714,562,792,592,642,800,1049,835,1009,1166,873,793,1014,1025,850,1133,823,791,808,973,841,1202,950,754,909,1057,866,1153,878,795,877,971,825,1149,775,1065,856,676,564,635,711,618,730,693,548,754,670,808,597,589,874,718,382,625,753,929,384,571,618,1149,687,598,353,388,573,469,404,361,503,465,521,557,588,999,716,376,361,378,414,668,592,760,753,518,602,695,554,577,982,982,942,980,817,798,883,632,695,741,721,701,615,773,658,574,790,589,769,560,400,474,482,931,971,895,833,1094,820,826,725,859,734,765,693,705,707,725,502,617,796,1043,671,681,761,818,676,820,679,654,617,713,697,489,624,620,1100,583,403,445,421,578,446,554,630,660,1073,785,444,626,583,580,602,588,617,1158,798,818,740,562,668,427,421,1059,610,356,412,527,544,531,614,631,1018,1065,751,739,787,815,708,610,741,352,324,367,413,384,312,346,413,510,614,610,901,1297,626,726,810,625,691,1055,406,351,412,354,599,783,775,380,570,676,540,518,816,1297,1051,778,846,744,804,829,616,523,548,531,548,778,730,715,729,846,757,698,584,609,498,679,716,1307,990,926,824,746,928,868,725,1151,892,566,864,1218,1293,596,622,609,755,703,1246,1506,1489,1559,1428,1312,1297,1023,852,882,1135,1062,1072,538,563,520,623,588,556,631,563,559,535,654,630,604,616,605,588,668,619,596,706,707,694,690,616,614,644,565,562,543,660,641,587,868,710,774,750,1284,1696,1122,819,1201,1342,939,880,781,665,483,790,379,358,559,885,633,548,722,703,578,653,488,886,742,779,709,801,741,849,588,500,591,616,893,737,591,614,635,668,439,477,371,370,838,700,581,849,730,670,836,559,635,670,465,364,409,315,629,551,492,736,630,640,729,629,912,611,796,699,821,568,437,495,629,895,514,532,550,762,748,646,614,854,507,559,361,411,307,572,529,638,621,594,650,903,447,515,542,965,725,546,792,495,1042,1081,700,624,786,1110,802,675,725,545,1230,1612,1533,1558,1556,1402,1282,1125,848,613,868,1039,1180,888,873,723,427,473,608,930,1589,1616,1588,1484,1459,1259,1060,1008,1197,895,956,1103,1101,920,792,823,944,1089,857,1180,986,818,730,713,832,985,916,728,1174,957,845,833,951,1068,937,1117,1018,933,732,833,868,1016,861,1191,936,747,1025,1076,877,681,669,683,390,375,941,787,640,742,754,631,588,636,626,937,722,1223,695,752,967,971,700,865,1178,899,597,581,593,581,554,584,616,644,673,624,1041,976,790,689,632,670,547,613,1166,972,884,768,1192,777,911,1161,753,882,739,811,714,622,920,642,602,635,728,547,1029,618,608,640,877,782,1029,931,769,890,648,375,468,698,1661,1628,1683,1469,1452,1224,1660,1679,1653,1485,1464,1213,1051,586,887,712,1076,1051,621,649,728,556,696,559,593,564,552,577,551,554,587,545,559,531,560,544,565,549,527,506,576,550,690,596,588,600,583,590,569,543,568,543,541,539,560,539,523,538,537,536,531,540,538,516,518,522,536,550,514,516,520,535,525,520,530,511,512,511,509,517,517,510,518,520,507,526,505,513,503,502,739,853,773,887,1164,786,767,893,634,733,762,857,612,669,654,600,790,820,830,622,604,861,574,698,524,785,820,1172,790,868,903,853,875,767,807,851,793,892,922,798,799,890,859,904,841,887,819,883,894,869,812,847,902,854,911,911,838,848,855,826,870,806,864,801,817,737,871,848,806,815,896,846,841,841,914,792,995,897,705,815,832,804,846,872,904,883,834,902,873,910,880,868,822,815,872,897,798,913,800,862,819,789,764,900,791,895,803,799,850,747,809,812,858,823,721,780,834,824,838,828,812,800,841,886,906,950,810,871,899,833,907,842,860,870,895,894,815,842,782,760,915,806,730,736,846,824,867,856,782,812,806,781,796,830,919,917,734,792,832,833,885,804,769,859,878,929,935,892,872,825,930,875,849,760,827,951,1136,866,913,884,839,871,856,813,905,806,895,775,917,812,872,930,818,821,882,817,910,885,869,884,807,834,775,857,918,917,880,815,828,784,844,871,866,855,828,882,865,817,877,918,891,882,869,884,880,854,900,872,804,920,896,835,863,870,887,859,824,885,845,858,822,902,837,851,892,902,819,853,860,811,827,801,820,850,831,819,816,802,828,825,874,826,819,871,918,857,859,825,862,859,801,830,845,861,852,841,874,871,830,842,855,881,945,887,925,889,899,878,859,879,836,865,869,867,840,828,871,825,906,923,829,905,849,862,859,901,837,886,877,832,857,826,831,849,825,898,812,936,1089,944,959,781,1125,822,826,690,782,819,838,756,660,825,668,513,505,1045,867,639,755,787,794,725,740,815,744,626,792,863,1089,786,835,672,771,775,858,686,845,817,812,684,630,874,545,638,637,1149,689,701,793,594,919,1252,580,583,591,591,591,568,552,559,585,555,612,805,600,630,634,780,645,659,678,585,540,761,385,503,667,555,682,714,756,908,1106,650,636,590,584,613,602,696,1148,755,915,855,895,1018,779,707,725,667,1076,817,719,836,578,731,604,606,587,592,561,541,624,562,574,541,550,560,551,545,547,541,609,557,677,594,592,590,581,571,558,559,555,557,563,543,553,552,531,545,529,540,549,527,543,521,534,530,529,522,525,524,529,524,518,535,524,517,520,513,535,514,515,521,511,523,507,516,515,513,742,949,804,1136,875,927,1114,1210,733,569,715,820,445,528,563,696,876,645,626,532,688,817,490,949,1406,942,911,627,891,924,698,910,798,823,565,554,558,476,520,685,691,719,602,936,652,752,1083,1186,926,895,830,843,1026,685,791,803,536,1040,982,790,785,600,632,539,567,1422,975,1364,936,781,1246,1040,981,884,771,880,845,1430,1166,1036,1067,1124,1152,1111,790,1220,1354,1388,1867,1359,1416,1652,1580,1580,1515,1097,835,953,965,1415,1136,1379,1220,1229,1181,1055,1083,1088,1149,1131,1280,1183,1253,1172,1494,1199,1332,889,1416,1292,1369,1252,1419,1398,1258,1199,947,1154,1209,1052,1204,1454,914,977,899,1036,1254,1377,1187,1379,1350,1327,1159,1384,1263,1505,1327,1255,1296,1338,1388,916,1239,1218,1214,980,1244,1441,1337,1438,1099,1461,1163,1323,1304,1239,974,1251,1201,1301,1139,1380,999,1220,1303,1099,1176,1415,1321,1292,1222,919,1322,1180,1164,1312,1260,1167,1030,1185,1299,1356,1078,1074,1452,1010,1161,1350,1193,1394,1264,1212,1239,1322,1224,1567,1618,1111,1300,1124,1267,1085,973,1295,1426,1102,923,1177,1203,1120,1199,1131,1231,1171,853,1168,795,1261,1044,1189,1033,828,568,1263,1020,1106,754,687,1135,791,1182,945,1091,1011,1335,1029,732,937,820,972,921,983,915,959,973,1132,1003,1045,1042,1055,1053,1046,706,1133,816,980,886,830,933,1143,951,1272,1088,1061,1141,981,1176,878,1040,1011,1104,1005,1309,1183,618,793,972,1007,891,1258,1191,936,1004,995,756,952,1129,453,473,893,734,700,935,619,857,422,497,797,557,438,516,1403,1329,1117,1302,1098,964,681,1044,998,957,927,957,768,515,1191,816,896,1028,843,1043,824,1352,1274,1009,953,1146,1347,1171,1289,1250,1071,1008,1266,1081,1160,931,1051,983,1073,1340,1261,1083,1017,878,1093,993,865,1090,1140,1028,846,860,722,1124,924,1262,898,1008,1119,1209,1168,933,853,920,604,1163,967,882,768,1121,948,1026,869,1007,1096,1230,665,770,880,945,1091,815,1088,925,860,943,1095,972,765,886,1054,415,567,974,575,915,789,570,1266,1048,981,1272,881,1396,1038,1005,1180,1214,1331,1133,971,1e3,926,686,1067,1156,1167,1026,1043,1067,1114,993,1173,933,1075,953,809,759,709,825,1044,962,1057,1104,802,1179,1104,1169,998,1136,770,819,702,877,1487,1544,1921,1934,1794,1930,1922,1861,1332,1114,1162,943,1007,1055,1130,834,566,866,987,1034,931,984,1078,538,889,967,892,923,932,990,1186,890,1007,1074,1086,632,1073,1021,910,1128,591,837,754,728,771,807,760,887,1123,959,1104,951,856,931,1115,1207,924,712,1031,923,1033,870,964,876,963,983,1157,1215,947,1017,1113,828,921,1061,794,939,955,1024,1011,1067,900,976,1090,759,1099,910,775,786,687,868,811,963,766,826,971,907,887,859,696,556,533,793,800,761,854,925,959,843,620,658,916,760,700,925,1033,928,517,764,827,1290,915,713,1028,918,864,782,788,630,951,702,626,878,756,773,763,950,1045,1047,1022,702,794,723,798,841,789,632,759,630,767,832,910,925,796,1010,1134,688,1031,748,911,897,909,1e3,763,792,962,1006,994,1098,958,1156,1038,954,908,659,760,719,1050,901,976,668,712,579,818,693,1016,831,644,804,683,475,803,760,723,1113,1135,1095,898,1077,691,628,514,932,944,1156,1091,1196,784,795,1274,996,983,1044,896,779,1144,891,770,856,1026,1094,902,1086,1091,938,1151,929,1043,1248,867,863,713,954,728,725,443,772,816,960,946,903,1194,1160,816,843,714,1180,805,856,1155,1004,969,582,981,950,1207,847,715,834,1277,1082,809,805,1121,939,994,1184,939,922,866,726,800,1211,810,991,774,884,687,742,877,941,703,715,1026,907,814,921,964,988,798,877,824,877,918,956,989,1266,1052,1188,1079,724,905,988,1103,1067,618,713,654,962,1016,858,873,1065,1017,1195,906,888,920,1091,1068,681,1143,873,652,679,1003,843,894,775,1167,870,705,681,1012,793,665,918,801,619,754,731,969,843,719,616,607,829,708,799,993,922,920,826,895,1150,1154,1227,1152,1036,914,1161,1158,1399,1296,1110,1272,951,1041,1195,1390,1273,1296,905,839,810,846,958,480,623,350,442,573,577,592,677,698,670,735,782,722,538,778,735,596,924,719,515,603,498,786,564,489,405,542,496,583,449,529,571,601,761,416,553,387,613,473,686,536,630,541,473,641,501,443,601,610,633,730,550,815,643,740,879,603,795,861,969,1289,1188,964,912,1112,557,914,659,707,674,739,704,865,1109,1107,1006,678,869,924,1e3,934,1047,1002,699,1182,1164,683,850,1172,984,908,961,936,847,916,437,668,990,730,978,1102,851,1210,1152,940,922,796,777,960,746,756,1212,1008,1068,1011,1002,950,811,792,811,1228,593,530,972,1104,1045,934,1024,1116,792,560,1041,1306,922,591,685,1164,494,997,802,944,775,813,848,1169,846,1131,973,797,1025,1110,864,697,974,1010,1249,991,1059,979,915,862,996,1113,1255,985,849,830,839,779,702,984,789,1125,1268,1380,1105,499,662,755,1280,984,1277,901,713,841,747,875,1128,874,735,827,818,970,928,700,823,1169,882,754,924,904,797,1137,788,619,832,991,1106,983,843,721,662,586,830,899,808,678,571,822,661,660,1017,726,876,916,1362,1312,788,472,795,792,606,583,726,878,948,1053,1015,964,760,671,595,1211,939,1049,1111,971,984,938,799,706,1408,826,1102,702,502,1074,1364,1188,2038,2048,2048,2048,2041,2057,1942,2012,1798,1564,1963,2048,2048,2048,1160,1087,1026,1987,2048,1387,661,1804,2048,2014,1177,1187,1191,1469,1106,1288,582,1275,1504,1026,1248,989,531,1119,989,287,282,630,1284,1302,1034,1025,1076,1289,1096,908,868,711,1085,765,654,1035,1042,1240,978,979,598,799,971,867,995,934,765,1118,1060,1252,1170,942,674,707,1003,1124,894,879,611,597,776,1078,933,625,628,654,805,785,791,949,881,967,706,732,655,974,954,1264,1110,1087,596,934,1075,908,1246,612,1066,881,647,552,555,1192,1144,1099,600,596,640,772,855,840,710,988,845,816,924,946,736,869,1156,873,1020,1026,1015,757,894,893,867,1207,858],successes:[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,1,1,0,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,0,0,0,1,1,1,1,0,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]};compressedData["data"]=byteArray;assert(typeof Module.LZ4==="object","LZ4 not present - was your app build with -s LZ4=1 ?");Module.LZ4.loadPackage({metadata:metadata,compressedData:compressedData},true);Module["removeRunDependency"]("datafile_build/test.data")}Module["addRunDependency"]("datafile_build/test.data");if(!Module.preloadResults)Module.preloadResults={};Module.preloadResults[PACKAGE_NAME]={fromCache:false};if(fetched){processPackageData(fetched);fetched=null}else{fetchedCallback=processPackageData}}if(Module["calledRun"]){runWithFS()}else{if(!Module["preRun"])Module["preRun"]=[];Module["preRun"].push(runWithFS)}};loadPackage({files:[{filename:"/lib/python3.9/test/Sine-1000Hz-300ms.aif",start:0,end:61696,audio:0},{filename:"/lib/python3.9/test/__init__.py",start:61696,end:61743,audio:0},{filename:"/lib/python3.9/test/__main__.py",start:61743,end:61784,audio:0},{filename:"/lib/python3.9/test/_test_multiprocessing.py",start:61784,end:251736,audio:0},{filename:"/lib/python3.9/test/allsans.pem",start:251736,end:261841,audio:0},{filename:"/lib/python3.9/test/ann_module.py",start:261841,end:262945,audio:0},{filename:"/lib/python3.9/test/ann_module2.py",start:262945,end:263464,audio:0},{filename:"/lib/python3.9/test/ann_module3.py",start:263464,end:263912,audio:0},{filename:"/lib/python3.9/test/audiotest.au",start:263912,end:292056,audio:0},{filename:"/lib/python3.9/test/audiotests.py",start:292056,end:304447,audio:0},{filename:"/lib/python3.9/test/audit-tests.py",start:304447,end:313557,audio:0},{filename:"/lib/python3.9/test/autotest.py",start:313557,end:313766,audio:0},{filename:"/lib/python3.9/test/bad_coding.py",start:313766,end:313790,audio:0},{filename:"/lib/python3.9/test/bad_coding2.py",start:313790,end:313820,audio:0},{filename:"/lib/python3.9/test/bad_getattr.py",start:313820,end:313881,audio:0},{filename:"/lib/python3.9/test/bad_getattr2.py",start:313881,end:313958,audio:0},{filename:"/lib/python3.9/test/bad_getattr3.py",start:313958,end:314097,audio:0},{filename:"/lib/python3.9/test/badcert.pem",start:314097,end:316025,audio:0},{filename:"/lib/python3.9/test/badkey.pem",start:316025,end:318187,audio:0},{filename:"/lib/python3.9/test/badsyntax_3131.py",start:318187,end:318219,audio:0},{filename:"/lib/python3.9/test/badsyntax_future10.py",start:318219,end:318314,audio:0},{filename:"/lib/python3.9/test/badsyntax_future3.py",start:318314,end:318486,audio:0},{filename:"/lib/python3.9/test/badsyntax_future4.py",start:318486,end:318639,audio:0},{filename:"/lib/python3.9/test/badsyntax_future5.py",start:318639,end:318823,audio:0},{filename:"/lib/python3.9/test/badsyntax_future6.py",start:318823,end:318984,audio:0},{filename:"/lib/python3.9/test/badsyntax_future7.py",start:318984,end:319180,audio:0},{filename:"/lib/python3.9/test/badsyntax_future8.py",start:319180,end:319302,audio:0},{filename:"/lib/python3.9/test/badsyntax_future9.py",start:319302,end:319444,audio:0},{filename:"/lib/python3.9/test/badsyntax_pep3120.py",start:319444,end:319458,audio:0},{filename:"/lib/python3.9/test/bisect_cmd.py",start:319458,end:324797,audio:0},{filename:"/lib/python3.9/test/cfgparser.1",start:324797,end:324864,audio:0},{filename:"/lib/python3.9/test/cfgparser.2",start:324864,end:344336,audio:0},{filename:"/lib/python3.9/test/cfgparser.3",start:344336,end:345923,audio:0},{filename:"/lib/python3.9/test/clinic.test",start:345923,end:442834,audio:0},{filename:"/lib/python3.9/test/cmath_testcases.txt",start:442834,end:587266,audio:0},{filename:"/lib/python3.9/test/coding20731.py",start:587266,end:587288,audio:0},{filename:"/lib/python3.9/test/curses_tests.py",start:587288,end:588530,audio:0},{filename:"/lib/python3.9/test/dataclass_module_1.py",start:588530,end:589367,audio:0},{filename:"/lib/python3.9/test/dataclass_module_1_str.py",start:589367,end:590202,audio:0},{filename:"/lib/python3.9/test/dataclass_module_2.py",start:590202,end:590958,audio:0},{filename:"/lib/python3.9/test/dataclass_module_2_str.py",start:590958,end:591712,audio:0},{filename:"/lib/python3.9/test/dataclass_textanno.py",start:591712,end:591838,audio:0},{filename:"/lib/python3.9/test/datetimetester.py",start:591838,end:840940,audio:0},{filename:"/lib/python3.9/test/dis_module.py",start:840940,end:841016,audio:0},{filename:"/lib/python3.9/test/doctest_aliases.py",start:841016,end:841256,audio:0},{filename:"/lib/python3.9/test/double_const.py",start:841256,end:842468,audio:0},{filename:"/lib/python3.9/test/empty.vbs",start:842468,end:842538,audio:0},{filename:"/lib/python3.9/test/exception_hierarchy.txt",start:842538,end:844360,audio:0},{filename:"/lib/python3.9/test/ffdh3072.pem",start:844360,end:846572,audio:0},{filename:"/lib/python3.9/test/final_a.py",start:846572,end:846983,audio:0},{filename:"/lib/python3.9/test/final_b.py",start:846983,end:847394,audio:0},{filename:"/lib/python3.9/test/floating_points.txt",start:847394,end:863696,audio:0},{filename:"/lib/python3.9/test/fork_wait.py",start:863696,end:865922,audio:0},{filename:"/lib/python3.9/test/formatfloat_testcases.txt",start:865922,end:873552,audio:0},{filename:"/lib/python3.9/test/future_test1.py",start:873552,end:873781,audio:0},{filename:"/lib/python3.9/test/future_test2.py",start:873781,end:873930,audio:0},{filename:"/lib/python3.9/test/gdb_sample.py",start:873930,end:874083,audio:0},{filename:"/lib/python3.9/test/good_getattr.py",start:874083,end:874281,audio:0},{filename:"/lib/python3.9/test/idnsans.pem",start:874281,end:884227,audio:0},{filename:"/lib/python3.9/test/ieee754.txt",start:884227,end:887510,audio:0},{filename:"/lib/python3.9/test/imp_dummy.py",start:887510,end:887573,audio:0},{filename:"/lib/python3.9/test/inspect_fodder.py",start:887573,end:889500,audio:0},{filename:"/lib/python3.9/test/inspect_fodder2.py",start:889500,end:892962,audio:0},{filename:"/lib/python3.9/test/keycert.passwd.pem",start:892962,end:897187,audio:0},{filename:"/lib/python3.9/test/keycert.pem",start:897187,end:901245,audio:0},{filename:"/lib/python3.9/test/keycert2.pem",start:901245,end:905323,audio:0},{filename:"/lib/python3.9/test/keycert3.pem",start:905323,end:914771,audio:0},{filename:"/lib/python3.9/test/keycert4.pem",start:914771,end:924233,audio:0},{filename:"/lib/python3.9/test/keycertecc.pem",start:924233,end:929870,audio:0},{filename:"/lib/python3.9/test/list_tests.py",start:929870,end:947331,audio:0},{filename:"/lib/python3.9/test/lock_tests.py",start:947331,end:978072,audio:0},{filename:"/lib/python3.9/test/mailcap.txt",start:978072,end:979342,audio:0},{filename:"/lib/python3.9/test/make_ssl_certs.py",start:979342,end:988810,audio:0},{filename:"/lib/python3.9/test/mapping_tests.py",start:988810,end:1011169,audio:0},{filename:"/lib/python3.9/test/math_testcases.txt",start:1011169,end:1034911,audio:0},{filename:"/lib/python3.9/test/memory_watchdog.py",start:1034911,end:1035770,audio:0},{filename:"/lib/python3.9/test/mime.types",start:1035770,end:1084279,audio:0},{filename:"/lib/python3.9/test/mock_socket.py",start:1084279,end:1088070,audio:0},{filename:"/lib/python3.9/test/mod_generics_cache.py",start:1088070,end:1089230,audio:0},{filename:"/lib/python3.9/test/mp_fork_bomb.py",start:1089230,end:1089678,audio:0},{filename:"/lib/python3.9/test/mp_preload.py",start:1089678,end:1090029,audio:0},{filename:"/lib/python3.9/test/multibytecodec_support.py",start:1090029,end:1104538,audio:0},{filename:"/lib/python3.9/test/nokia.pem",start:1104538,end:1106461,audio:0},{filename:"/lib/python3.9/test/nosan.pem",start:1106461,end:1114180,audio:0},{filename:"/lib/python3.9/test/nullbytecert.pem",start:1114180,end:1119615,audio:0},{filename:"/lib/python3.9/test/nullcert.pem",start:1119615,end:1119615,audio:0},{filename:"/lib/python3.9/test/pickletester.py",start:1119615,end:1259572,audio:0},{filename:"/lib/python3.9/test/profilee.py",start:1259572,end:1262613,audio:0},{filename:"/lib/python3.9/test/pstats.pck",start:1262613,end:1329220,audio:0},{filename:"/lib/python3.9/test/pycacert.pem",start:1329220,end:1334884,audio:0},{filename:"/lib/python3.9/test/pycakey.pem",start:1334884,end:1337368,audio:0},{filename:"/lib/python3.9/test/pyclbr_input.py",start:1337368,end:1338016,audio:0},{filename:"/lib/python3.9/test/pydoc_mod.py",start:1338016,end:1338729,audio:0},{filename:"/lib/python3.9/test/pydocfodder.py",start:1338729,end:1345061,audio:0},{filename:"/lib/python3.9/test/pythoninfo.py",start:1345061,end:1366324,audio:0},{filename:"/lib/python3.9/test/randv2_32.pck",start:1366324,end:1373841,audio:0},{filename:"/lib/python3.9/test/randv2_64.pck",start:1373841,end:1381206,audio:0},{filename:"/lib/python3.9/test/randv3.pck",start:1381206,end:1389210,audio:0},{filename:"/lib/python3.9/test/re_tests.py",start:1389210,end:1415762,audio:0},{filename:"/lib/python3.9/test/recursion.tar",start:1415762,end:1416278,audio:0},{filename:"/lib/python3.9/test/regrtest.py",start:1416278,end:1417573,audio:0},{filename:"/lib/python3.9/test/relimport.py",start:1417573,end:1417600,audio:0},{filename:"/lib/python3.9/test/reperf.py",start:1417600,end:1418138,audio:0},{filename:"/lib/python3.9/test/revocation.crl",start:1418138,end:1418938,audio:0},{filename:"/lib/python3.9/test/sample_doctest.py",start:1418938,end:1419979,audio:0},{filename:"/lib/python3.9/test/sample_doctest_no_docstrings.py",start:1419979,end:1420206,audio:0},{filename:"/lib/python3.9/test/sample_doctest_no_doctests.py",start:1420206,end:1420475,audio:0},{filename:"/lib/python3.9/test/secp384r1.pem",start:1420475,end:1420731,audio:0},{filename:"/lib/python3.9/test/selfsigned_pythontestdotnet.pem",start:1420731,end:1422861,audio:0},{filename:"/lib/python3.9/test/seq_tests.py",start:1422861,end:1438087,audio:0},{filename:"/lib/python3.9/test/sgml_input.html",start:1438087,end:1446381,audio:0},{filename:"/lib/python3.9/test/signalinterproctester.py",start:1446381,end:1449184,audio:0},{filename:"/lib/python3.9/test/sortperf.py",start:1449184,end:1453990,audio:0},{filename:"/lib/python3.9/test/ssl_cert.pem",start:1453990,end:1455560,audio:0},{filename:"/lib/python3.9/test/ssl_key.passwd.pem",start:1455560,end:1458214,audio:0},{filename:"/lib/python3.9/test/ssl_key.pem",start:1458214,end:1460702,audio:0},{filename:"/lib/python3.9/test/ssl_servers.py",start:1460702,end:1467981,audio:0},{filename:"/lib/python3.9/test/ssltests.py",start:1467981,end:1469032,audio:0},{filename:"/lib/python3.9/test/string_tests.py",start:1469032,end:1537176,audio:0},{filename:"/lib/python3.9/test/talos-2019-0758.pem",start:1537176,end:1538506,audio:0},{filename:"/lib/python3.9/test/test___all__.py",start:1538506,end:1542785,audio:0},{filename:"/lib/python3.9/test/test___future__.py",start:1542785,end:1545206,audio:0},{filename:"/lib/python3.9/test/test__locale.py",start:1545206,end:1553225,audio:0},{filename:"/lib/python3.9/test/test__opcode.py",start:1553225,end:1556332,audio:0},{filename:"/lib/python3.9/test/test__osx_support.py",start:1556332,end:1570315,audio:0},{filename:"/lib/python3.9/test/test__xxsubinterpreters.py",start:1570315,end:1650609,audio:0},{filename:"/lib/python3.9/test/test_abc.py",start:1650609,end:1669791,audio:0},{filename:"/lib/python3.9/test/test_abstract_numbers.py",start:1669791,end:1671319,audio:0},{filename:"/lib/python3.9/test/test_aifc.py",start:1671319,end:1689442,audio:0},{filename:"/lib/python3.9/test/test_argparse.py",start:1689442,end:1868538,audio:0},{filename:"/lib/python3.9/test/test_array.py",start:1868538,end:1921395,audio:0},{filename:"/lib/python3.9/test/test_asdl_parser.py",start:1921395,end:1925635,audio:0},{filename:"/lib/python3.9/test/test_ast.py",start:1925635,end:2025803,audio:0},{filename:"/lib/python3.9/test/test_asyncgen.py",start:2025803,end:2058002,audio:0},{filename:"/lib/python3.9/test/test_asynchat.py",start:2058002,end:2067382,audio:0},{filename:"/lib/python3.9/test/test_asyncore.py",start:2067382,end:2093842,audio:0},{filename:"/lib/python3.9/test/test_atexit.py",start:2093842,end:2099793,audio:0},{filename:"/lib/python3.9/test/test_audioop.py",start:2099793,end:2128707,audio:0},{filename:"/lib/python3.9/test/test_audit.py",start:2128707,end:2132850,audio:0},{filename:"/lib/python3.9/test/test_augassign.py",start:2132850,end:2140718,audio:0},{filename:"/lib/python3.9/test/test_base64.py",start:2140718,end:2171319,audio:0},{filename:"/lib/python3.9/test/test_baseexception.py",start:2171319,end:2178348,audio:0},{filename:"/lib/python3.9/test/test_bdb.py",start:2178348,end:2220893,audio:0},{filename:"/lib/python3.9/test/test_bigaddrspace.py",start:2220893,end:2223850,audio:0},{filename:"/lib/python3.9/test/test_bigmem.py",start:2223850,end:2269810,audio:0},{filename:"/lib/python3.9/test/test_binascii.py",start:2269810,end:2289140,audio:0},{filename:"/lib/python3.9/test/test_binhex.py",start:2289140,end:2291149,audio:0},{filename:"/lib/python3.9/test/test_binop.py",start:2291149,end:2305628,audio:0},{filename:"/lib/python3.9/test/test_bisect.py",start:2305628,end:2319588,audio:0},{filename:"/lib/python3.9/test/test_bool.py",start:2319588,end:2332372,audio:0},{filename:"/lib/python3.9/test/test_buffer.py",start:2332372,end:2496491,audio:0},{filename:"/lib/python3.9/test/test_bufio.py",start:2496491,end:2499088,audio:0},{filename:"/lib/python3.9/test/test_builtin.py",start:2499088,end:2581389,audio:0},{filename:"/lib/python3.9/test/test_bytes.py",start:2581389,end:2657829,audio:0},{filename:"/lib/python3.9/test/test_bz2.py",start:2657829,end:2695544,audio:0},{filename:"/lib/python3.9/test/test_c_locale_coercion.py",start:2695544,end:2715004,audio:0},{filename:"/lib/python3.9/test/test_calendar.py",start:2715004,end:2764888,audio:0},{filename:"/lib/python3.9/test/test_call.py",start:2764888,end:2789558,audio:0},{filename:"/lib/python3.9/test/test_capi.py",start:2789558,end:2825223,audio:0},{filename:"/lib/python3.9/test/test_cgi.py",start:2825223,end:2847958,audio:0},{filename:"/lib/python3.9/test/test_cgitb.py",start:2847958,end:2850550,audio:0},{filename:"/lib/python3.9/test/test_charmapcodec.py",start:2850550,end:2852268,audio:0},{filename:"/lib/python3.9/test/test_check_c_globals.py",start:2852268,end:2853017,audio:0},{filename:"/lib/python3.9/test/test_class.py",start:2853017,end:2870848,audio:0},{filename:"/lib/python3.9/test/test_clinic.py",start:2870848,end:2892873,audio:0},{filename:"/lib/python3.9/test/test_cmath.py",start:2892873,end:2917517,audio:0},{filename:"/lib/python3.9/test/test_cmd.py",start:2917517,end:2923766,audio:0},{filename:"/lib/python3.9/test/test_cmd_line.py",start:2923766,end:2958928,audio:0},{filename:"/lib/python3.9/test/test_cmd_line_script.py",start:2958928,end:2992208,audio:0},{filename:"/lib/python3.9/test/test_code.py",start:2992208,end:3005118,audio:0},{filename:"/lib/python3.9/test/test_code_module.py",start:3005118,end:3010764,audio:0},{filename:"/lib/python3.9/test/test_codeccallbacks.py",start:3010764,end:3052714,audio:0},{filename:"/lib/python3.9/test/test_codecencodings_cn.py",start:3052714,end:3056664,audio:0},{filename:"/lib/python3.9/test/test_codecencodings_hk.py",start:3056664,end:3057365,audio:0},{filename:"/lib/python3.9/test/test_codecencodings_iso2022.py",start:3057365,end:3058755,audio:0},{filename:"/lib/python3.9/test/test_codecencodings_jp.py",start:3058755,end:3063662,audio:0},{filename:"/lib/python3.9/test/test_codecencodings_kr.py",start:3063662,end:3066690,audio:0},{filename:"/lib/python3.9/test/test_codecencodings_tw.py",start:3066690,end:3067371,audio:0},{filename:"/lib/python3.9/test/test_codecmaps_cn.py",start:3067371,end:3068117,audio:0},{filename:"/lib/python3.9/test/test_codecmaps_hk.py",start:3068117,end:3068503,audio:0},{filename:"/lib/python3.9/test/test_codecmaps_jp.py",start:3068503,end:3070247,audio:0},{filename:"/lib/python3.9/test/test_codecmaps_kr.py",start:3070247,end:3071435,audio:0},{filename:"/lib/python3.9/test/test_codecmaps_tw.py",start:3071435,end:3072140,audio:0},{filename:"/lib/python3.9/test/test_codecs.py",start:3072140,end:3204614,audio:0},{filename:"/lib/python3.9/test/test_codeop.py",start:3204614,end:3213078,audio:0},{filename:"/lib/python3.9/test/test_collections.py",start:3213078,end:3304829,audio:0},{filename:"/lib/python3.9/test/test_colorsys.py",start:3304829,end:3308756,audio:0},{filename:"/lib/python3.9/test/test_compare.py",start:3308756,end:3312584,audio:0},{filename:"/lib/python3.9/test/test_compile.py",start:3312584,end:3350488,audio:0},{filename:"/lib/python3.9/test/test_compileall.py",start:3350488,end:3396350,audio:0},{filename:"/lib/python3.9/test/test_complex.py",start:3396350,end:3427316,audio:0},{filename:"/lib/python3.9/test/test_concurrent_futures.py",start:3427316,end:3480887,audio:0},{filename:"/lib/python3.9/test/test_configparser.py",start:3480887,end:3567851,audio:0},{filename:"/lib/python3.9/test/test_contains.py",start:3567851,end:3571283,audio:0},{filename:"/lib/python3.9/test/test_context.py",start:3571283,end:3601194,audio:0},{filename:"/lib/python3.9/test/test_contextlib.py",start:3601194,end:3634492,audio:0},{filename:"/lib/python3.9/test/test_contextlib_async.py",start:3634492,end:3649643,audio:0},{filename:"/lib/python3.9/test/test_copy.py",start:3649643,end:3676075,audio:0},{filename:"/lib/python3.9/test/test_copyreg.py",start:3676075,end:3680573,audio:0},{filename:"/lib/python3.9/test/test_coroutines.py",start:3680573,end:3744758,audio:0},{filename:"/lib/python3.9/test/test_cprofile.py",start:3744758,end:3751235,audio:0},{filename:"/lib/python3.9/test/test_crashers.py",start:3751235,end:3752432,audio:0},{filename:"/lib/python3.9/test/test_crypt.py",start:3752432,end:3756495,audio:0},{filename:"/lib/python3.9/test/test_csv.py",start:3756495,end:3805416,audio:0},{filename:"/lib/python3.9/test/test_ctypes.py",start:3805416,end:3805600,audio:0},{filename:"/lib/python3.9/test/test_curses.py",start:3805600,end:3852712,audio:0},{filename:"/lib/python3.9/test/test_dataclasses.py",start:3852712,end:3963858,audio:0},{filename:"/lib/python3.9/test/test_datetime.py",start:3963858,end:3966059,audio:0},{filename:"/lib/python3.9/test/test_dbm.py",start:3966059,end:3972661,audio:0},{filename:"/lib/python3.9/test/test_dbm_dumb.py",start:3972661,end:3983529,audio:0},{filename:"/lib/python3.9/test/test_dbm_gnu.py",start:3983529,end:3989895,audio:0},{filename:"/lib/python3.9/test/test_dbm_ndbm.py",start:3989895,end:3995070,audio:0},{filename:"/lib/python3.9/test/test_decimal.py",start:3995070,end:4208516,audio:0},{filename:"/lib/python3.9/test/test_decorators.py",start:4208516,end:4219700,audio:0},{filename:"/lib/python3.9/test/test_defaultdict.py",start:4219700,end:4227042,audio:0},{filename:"/lib/python3.9/test/test_deque.py",start:4227042,end:4262328,audio:0},{filename:"/lib/python3.9/test/test_descr.py",start:4262328,end:4457854,audio:0},{filename:"/lib/python3.9/test/test_descrtut.py",start:4457854,end:4469684,audio:0},{filename:"/lib/python3.9/test/test_devpoll.py",start:4469684,end:4474292,audio:0},{filename:"/lib/python3.9/test/test_dict.py",start:4474292,end:4521658,audio:0},{filename:"/lib/python3.9/test/test_dict_version.py",start:4521658,end:4527881,audio:0},{filename:"/lib/python3.9/test/test_dictcomps.py",start:4527881,end:4533153,audio:0},{filename:"/lib/python3.9/test/test_dictviews.py",start:4533153,end:4546964,audio:0},{filename:"/lib/python3.9/test/test_difflib.py",start:4546964,end:4569040,audio:0},{filename:"/lib/python3.9/test/test_difflib_expect.html",start:4569040,end:4672306,audio:0},{filename:"/lib/python3.9/test/test_dis.py",start:4672306,end:4725633,audio:0},{filename:"/lib/python3.9/test/test_distutils.py",start:4725633,end:4726116,audio:0},{filename:"/lib/python3.9/test/test_doctest.py",start:4726116,end:4827156,audio:0},{filename:"/lib/python3.9/test/test_doctest.txt",start:4827156,end:4827456,audio:0},{filename:"/lib/python3.9/test/test_doctest2.py",start:4827456,end:4829815,audio:0},{filename:"/lib/python3.9/test/test_doctest2.txt",start:4829815,end:4830207,audio:0},{filename:"/lib/python3.9/test/test_doctest3.txt",start:4830207,end:4830289,audio:0},{filename:"/lib/python3.9/test/test_doctest4.txt",start:4830289,end:4830533,audio:0},{filename:"/lib/python3.9/test/test_docxmlrpc.py",start:4830533,end:4839414,audio:0},{filename:"/lib/python3.9/test/test_dtrace.py",start:4839414,end:4844770,audio:0},{filename:"/lib/python3.9/test/test_enum.py",start:4844770,end:4963345,audio:0},{filename:"/lib/python3.9/test/test_dynamic.py",start:4963345,end:4967739,audio:0},{filename:"/lib/python3.9/test/test_dynamicclassattribute.py",start:4967739,end:4977534,audio:0},{filename:"/lib/python3.9/test/test_eintr.py",start:4977534,end:4978887,audio:0},{filename:"/lib/python3.9/test/test_embed.py",start:4978887,end:5031311,audio:0},{filename:"/lib/python3.9/test/test_ensurepip.py",start:5031311,end:5041375,audio:0},{filename:"/lib/python3.9/test/test_enumerate.py",start:5041375,end:5050041,audio:0},{filename:"/lib/python3.9/test/test_eof.py",start:5050041,end:5052531,audio:0},{filename:"/lib/python3.9/test/test_epoll.py",start:5052531,end:5061888,audio:0},{filename:"/lib/python3.9/test/test_errno.py",start:5061888,end:5062957,audio:0},{filename:"/lib/python3.9/test/test_exception_hierarchy.py",start:5062957,end:5070567,audio:0},{filename:"/lib/python3.9/test/test_exception_variations.py",start:5070567,end:5074515,audio:0},{filename:"/lib/python3.9/test/test_exceptions.py",start:5074515,end:5126368,audio:0},{filename:"/lib/python3.9/test/test_extcall.py",start:5126368,end:5140753,audio:0},{filename:"/lib/python3.9/test/test_faulthandler.py",start:5140753,end:5169967,audio:0},{filename:"/lib/python3.9/test/test_fcntl.py",start:5169967,end:5176789,audio:0},{filename:"/lib/python3.9/test/test_file.py",start:5176789,end:5188070,audio:0},{filename:"/lib/python3.9/test/test_file_eintr.py",start:5188070,end:5198924,audio:0},{filename:"/lib/python3.9/test/test_filecmp.py",start:5198924,end:5207818,audio:0},{filename:"/lib/python3.9/test/test_fileinput.py",start:5207818,end:5246049,audio:0},{filename:"/lib/python3.9/test/test_fileio.py",start:5246049,end:5266493,audio:0},{filename:"/lib/python3.9/test/test_finalization.py",start:5266493,end:5281502,audio:0},{filename:"/lib/python3.9/test/test_float.py",start:5281502,end:5347881,audio:0},{filename:"/lib/python3.9/test/test_flufl.py",start:5347881,end:5349542,audio:0},{filename:"/lib/python3.9/test/test_fnmatch.py",start:5349542,end:5356476,audio:0},{filename:"/lib/python3.9/test/test_fork1.py",start:5356476,end:5359792,audio:0},{filename:"/lib/python3.9/test/test_format.py",start:5359792,end:5384268,audio:0},{filename:"/lib/python3.9/test/test_fractions.py",start:5384268,end:5414569,audio:0},{filename:"/lib/python3.9/test/test_frame.py",start:5414569,end:5420380,audio:0},{filename:"/lib/python3.9/test/test_frozen.py",start:5420380,end:5421323,audio:0},{filename:"/lib/python3.9/test/test_fstring.py",start:5421323,end:5472841,audio:0},{filename:"/lib/python3.9/test/test_ftplib.py",start:5472841,end:5515486,audio:0},{filename:"/lib/python3.9/test/test_funcattrs.py",start:5515486,end:5529356,audio:0},{filename:"/lib/python3.9/test/test_functools.py",start:5529356,end:5623914,audio:0},{filename:"/lib/python3.9/test/test_future.py",start:5623914,end:5637199,audio:0},{filename:"/lib/python3.9/test/test_future3.py",start:5637199,end:5637689,audio:0},{filename:"/lib/python3.9/test/test_future4.py",start:5637689,end:5637911,audio:0},{filename:"/lib/python3.9/test/test_future5.py",start:5637911,end:5638421,audio:0},{filename:"/lib/python3.9/test/test_gc.py",start:5638421,end:5684371,audio:0},{filename:"/lib/python3.9/test/test_gdb.py",start:5684371,end:5726650,audio:0},{filename:"/lib/python3.9/test/test_generator_stop.py",start:5726650,end:5727593,audio:0},{filename:"/lib/python3.9/test/test_generators.py",start:5727593,end:5791821,audio:0},{filename:"/lib/python3.9/test/test_genericalias.py",start:5791821,end:5806314,audio:0},{filename:"/lib/python3.9/test/test_genericclass.py",start:5806314,end:5815819,audio:0},{filename:"/lib/python3.9/test/test_genericpath.py",start:5815819,end:5838032,audio:0},{filename:"/lib/python3.9/test/test_genexps.py",start:5838032,end:5845799,audio:0},{filename:"/lib/python3.9/test/test_getargs2.py",start:5845799,end:5896996,audio:0},{filename:"/lib/python3.9/test/test_getopt.py",start:5896996,end:5903906,audio:0},{filename:"/lib/python3.9/test/test_getpass.py",start:5903906,end:5910343,audio:0},{filename:"/lib/python3.9/test/test_gettext.py",start:5910343,end:5952717,audio:0},{filename:"/lib/python3.9/test/test_glob.py",start:5952717,end:5966062,audio:0},{filename:"/lib/python3.9/test/test_global.py",start:5966062,end:5967402,audio:0},{filename:"/lib/python3.9/test/test_grammar.py",start:5967402,end:6029470,audio:0},{filename:"/lib/python3.9/test/test_graphlib.py",start:6029470,end:6037674,audio:0},{filename:"/lib/python3.9/test/test_grp.py",start:6037674,end:6041302,audio:0},{filename:"/lib/python3.9/test/test_gzip.py",start:6041302,end:6072400,audio:0},{filename:"/lib/python3.9/test/test_hash.py",start:6072400,end:6084122,audio:0},{filename:"/lib/python3.9/test/test_hashlib.py",start:6084122,end:6127821,audio:0},{filename:"/lib/python3.9/test/test_heapq.py",start:6127821,end:6144613,audio:0},{filename:"/lib/python3.9/test/test_hmac.py",start:6144613,end:6170153,audio:0},{filename:"/lib/python3.9/test/test_html.py",start:6170153,end:6174489,audio:0},{filename:"/lib/python3.9/test/test_htmlparser.py",start:6174489,end:6208211,audio:0},{filename:"/lib/python3.9/test/test_http_cookiejar.py",start:6208211,end:6288449,audio:0},{filename:"/lib/python3.9/test/test_http_cookies.py",start:6288449,end:6307603,audio:0},{filename:"/lib/python3.9/test/test_httplib.py",start:6307603,end:6386980,audio:0},{filename:"/lib/python3.9/test/test_httpservers.py",start:6386980,end:6438700,audio:0},{filename:"/lib/python3.9/test/test_idle.py",start:6438700,end:6439525,audio:0},{filename:"/lib/python3.9/test/test_imaplib.py",start:6439525,end:6481567,audio:0},{filename:"/lib/python3.9/test/test_imghdr.py",start:6481567,end:6486334,audio:0},{filename:"/lib/python3.9/test/test_imp.py",start:6486334,end:6504493,audio:0},{filename:"/lib/python3.9/test/test_index.py",start:6504493,end:6513065,audio:0},{filename:"/lib/python3.9/test/test_inspect.py",start:6513065,end:6670719,audio:0},{filename:"/lib/python3.9/test/test_int.py",start:6670719,end:6692019,audio:0},{filename:"/lib/python3.9/test/test_int_literal.py",start:6692019,end:6699072,audio:0},{filename:"/lib/python3.9/test/test_io.py",start:6699072,end:6868703,audio:0},{filename:"/lib/python3.9/test/test_ioctl.py",start:6868703,end:6871983,audio:0},{filename:"/lib/python3.9/test/test_ipaddress.py",start:6871983,end:6990234,audio:0},{filename:"/lib/python3.9/test/test_isinstance.py",start:6990234,end:7000735,audio:0},{filename:"/lib/python3.9/test/test_iter.py",start:7000735,end:7034025,audio:0},{filename:"/lib/python3.9/test/test_iterlen.py",start:7034025,end:7041291,audio:0},{filename:"/lib/python3.9/test/test_itertools.py",start:7041291,end:7145197,audio:0},{filename:"/lib/python3.9/test/test_keyword.py",start:7145197,end:7146622,audio:0},{filename:"/lib/python3.9/test/test_keywordonlyarg.py",start:7146622,end:7153639,audio:0},{filename:"/lib/python3.9/test/test_kqueue.py",start:7153639,end:7162605,audio:0},{filename:"/lib/python3.9/test/test_largefile.py",start:7162605,end:7172787,audio:0},{filename:"/lib/python3.9/test/test_lib2to3.py",start:7172787,end:7173051,audio:0},{filename:"/lib/python3.9/test/test_linecache.py",start:7173051,end:7181031,audio:0},{filename:"/lib/python3.9/test/test_list.py",start:7181031,end:7188749,audio:0},{filename:"/lib/python3.9/test/test_listcomps.py",start:7188749,end:7193016,audio:0},{filename:"/lib/python3.9/test/test_lltrace.py",start:7193016,end:7194028,audio:0},{filename:"/lib/python3.9/test/test_locale.py",start:7194028,end:7218422,audio:0},{filename:"/lib/python3.9/test/test_logging.py",start:7218422,end:7410192,audio:0},{filename:"/lib/python3.9/test/test_long.py",start:7410192,end:7464884,audio:0},{filename:"/lib/python3.9/test/test_longexp.py",start:7464884,end:7465117,audio:0},{filename:"/lib/python3.9/test/test_lzma.py",start:7465117,end:7555114,audio:0},{filename:"/lib/python3.9/test/test_mailbox.py",start:7555114,end:7649241,audio:0},{filename:"/lib/python3.9/test/test_mailcap.py",start:7649241,end:7659358,audio:0},{filename:"/lib/python3.9/test/test_marshal.py",start:7659358,end:7680423,audio:0},{filename:"/lib/python3.9/test/test_math.py",start:7680423,end:7769653,audio:0},{filename:"/lib/python3.9/test/test_memoryio.py",start:7769653,end:7801892,audio:0},{filename:"/lib/python3.9/test/test_memoryview.py",start:7801892,end:7820102,audio:0},{filename:"/lib/python3.9/test/test_metaclass.py",start:7820102,end:7826463,audio:0},{filename:"/lib/python3.9/test/test_mimetypes.py",start:7826463,end:7838893,audio:0},{filename:"/lib/python3.9/test/test_minidom.py",start:7838893,end:7908189,audio:0},{filename:"/lib/python3.9/test/test_mmap.py",start:7908189,end:7939754,audio:0},{filename:"/lib/python3.9/test/test_module.py",start:7939754,end:7950217,audio:0},{filename:"/lib/python3.9/test/test_modulefinder.py",start:7950217,end:7962709,audio:0},{filename:"/lib/python3.9/test/test_msilib.py",start:7962709,end:7967857,audio:0},{filename:"/lib/python3.9/test/test_multibytecodec.py",start:7967857,end:7983255,audio:0},{filename:"/lib/python3.9/test/test_multiprocessing_fork.py",start:7983255,end:7983732,audio:0},{filename:"/lib/python3.9/test/test_multiprocessing_forkserver.py",start:7983732,end:7984124,audio:0},{filename:"/lib/python3.9/test/test_multiprocessing_main_handling.py",start:7984124,end:7995845,audio:0},{filename:"/lib/python3.9/test/test_multiprocessing_spawn.py",start:7995845,end:7996122,audio:0},{filename:"/lib/python3.9/test/test_named_expressions.py",start:7996122,end:8016733,audio:0},{filename:"/lib/python3.9/test/test_netrc.py",start:8016733,end:8022838,audio:0},{filename:"/lib/python3.9/test/test_nis.py",start:8022838,end:8023994,audio:0},{filename:"/lib/python3.9/test/test_nntplib.py",start:8023994,end:8087875,audio:0},{filename:"/lib/python3.9/test/test_ntpath.py",start:8087875,end:8123790,audio:0},{filename:"/lib/python3.9/test/test_numeric_tower.py",start:8123790,end:8131142,audio:0},{filename:"/lib/python3.9/test/test_opcodes.py",start:8131142,end:8134834,audio:0},{filename:"/lib/python3.9/test/test_openpty.py",start:8134834,end:8135434,audio:0},{filename:"/lib/python3.9/test/test_operator.py",start:8135434,end:8159543,audio:0},{filename:"/lib/python3.9/test/test_optparse.py",start:8159543,end:8222001,audio:0},{filename:"/lib/python3.9/test/test_ordered_dict.py",start:8222001,end:8254229,audio:0},{filename:"/lib/python3.9/test/test_os.py",start:8254229,end:8415138,audio:0},{filename:"/lib/python3.9/test/test_ossaudiodev.py",start:8415138,end:8422364,audio:0},{filename:"/lib/python3.9/test/test_osx_env.py",start:8422364,end:8423692,audio:0},{filename:"/lib/python3.9/test/test_parser.py",start:8423692,end:8462404,audio:0},{filename:"/lib/python3.9/test/test_pathlib.py",start:8462404,end:8568293,audio:0},{filename:"/lib/python3.9/test/test_pdb.py",start:8568293,end:8625145,audio:0},{filename:"/lib/python3.9/test/test_peepholer.py",start:8625145,end:8645848,audio:0},{filename:"/lib/python3.9/test/test_peg_parser.py",start:8645848,end:8669896,audio:0},{filename:"/lib/python3.9/test/test_pickle.py",start:8669896,end:8689520,audio:0},{filename:"/lib/python3.9/test/test_picklebuffer.py",start:8689520,end:8694597,audio:0},{filename:"/lib/python3.9/test/test_pickletools.py",start:8694597,end:8699031,audio:0},{filename:"/lib/python3.9/test/test_pipes.py",start:8699031,end:8705775,audio:0},{filename:"/lib/python3.9/test/test_pkg.py",start:8705775,end:8715599,audio:0},{filename:"/lib/python3.9/test/test_pkgutil.py",start:8715599,end:8737353,audio:0},{filename:"/lib/python3.9/test/test_platform.py",start:8737353,end:8754297,audio:0},{filename:"/lib/python3.9/test/test_plistlib.py",start:8754297,end:8793909,audio:0},{filename:"/lib/python3.9/test/test_poll.py",start:8793909,end:8801317,audio:0},{filename:"/lib/python3.9/test/test_popen.py",start:8801317,end:8803367,audio:0},{filename:"/lib/python3.9/test/test_poplib.py",start:8803367,end:8821223,audio:0},{filename:"/lib/python3.9/test/test_positional_only_arg.py",start:8821223,end:8839417,audio:0},{filename:"/lib/python3.9/test/test_posix.py",start:8839417,end:8926561,audio:0},{filename:"/lib/python3.9/test/test_posixpath.py",start:8926561,end:8956439,audio:0},{filename:"/lib/python3.9/test/test_pow.py",start:8956439,end:8962009,audio:0},{filename:"/lib/python3.9/test/test_pprint.py",start:8962009,end:9008439,audio:0},{filename:"/lib/python3.9/test/test_print.py",start:9008439,end:9015986,audio:0},{filename:"/lib/python3.9/test/test_profile.py",start:9015986,end:9024916,audio:0},{filename:"/lib/python3.9/test/test_property.py",start:9024916,end:9034597,audio:0},{filename:"/lib/python3.9/test/test_pstats.py",start:9034597,end:9038240,audio:0},{filename:"/lib/python3.9/test/test_pty.py",start:9038240,end:9050522,audio:0},{filename:"/lib/python3.9/test/test_pulldom.py",start:9050522,end:9063490,audio:0},{filename:"/lib/python3.9/test/test_pwd.py",start:9063490,end:9067758,audio:0},{filename:"/lib/python3.9/test/test_py_compile.py",start:9067758,end:9078793,audio:0},{filename:"/lib/python3.9/test/test_pyclbr.py",start:9078793,end:9088917,audio:0},{filename:"/lib/python3.9/test/test_pydoc.py",start:9088917,end:9147626,audio:0},{filename:"/lib/python3.9/test/test_pyexpat.py",start:9147626,end:9174957,audio:0},{filename:"/lib/python3.9/test/test_queue.py",start:9174957,end:9195793,audio:0},{filename:"/lib/python3.9/test/test_quopri.py",start:9195793,end:9203755,audio:0},{filename:"/lib/python3.9/test/test_raise.py",start:9203755,end:9216923,audio:0},{filename:"/lib/python3.9/test/test_random.py",start:9216923,end:9269578,audio:0},{filename:"/lib/python3.9/test/test_range.py",start:9269578,end:9293563,audio:0},{filename:"/lib/python3.9/test/test_re.py",start:9293563,end:9402515,audio:0},{filename:"/lib/python3.9/test/test_readline.py",start:9402515,end:9415772,audio:0},{filename:"/lib/python3.9/test/test_regrtest.py",start:9415772,end:9464579,audio:0},{filename:"/lib/python3.9/test/test_repl.py",start:9464579,end:9468629,audio:0},{filename:"/lib/python3.9/test/test_reprlib.py",start:9468629,end:9484107,audio:0},{filename:"/lib/python3.9/test/test_resource.py",start:9484107,end:9491296,audio:0},{filename:"/lib/python3.9/test/test_richcmp.py",start:9491296,end:9503492,audio:0},{filename:"/lib/python3.9/test/test_rlcompleter.py",start:9503492,end:9509941,audio:0},{filename:"/lib/python3.9/test/test_robotparser.py",start:9509941,end:9521035,audio:0},{filename:"/lib/python3.9/test/test_runpy.py",start:9521035,end:9555703,audio:0},{filename:"/lib/python3.9/test/test_sax.py",start:9555703,end:9603841,audio:0},{filename:"/lib/python3.9/test/test_sched.py",start:9603841,end:9610387,audio:0},{filename:"/lib/python3.9/test/test_scope.py",start:9610387,end:9630564,audio:0},{filename:"/lib/python3.9/test/test_script_helper.py",start:9630564,end:9636480,audio:0},{filename:"/lib/python3.9/test/test_secrets.py",start:9636480,end:9640861,audio:0},{filename:"/lib/python3.9/test/test_select.py",start:9640861,end:9643619,audio:0},{filename:"/lib/python3.9/test/test_selectors.py",start:9643619,end:9662287,audio:0},{filename:"/lib/python3.9/test/test_set.py",start:9662287,end:9728244,audio:0},{filename:"/lib/python3.9/test/test_setcomps.py",start:9728244,end:9732490,audio:0},{filename:"/lib/python3.9/test/test_shelve.py",start:9732490,end:9738884,audio:0},{filename:"/lib/python3.9/test/test_shlex.py",start:9738884,end:9752685,audio:0},{filename:"/lib/python3.9/test/test_shutil.py",start:9752685,end:9856908,audio:0},{filename:"/lib/python3.9/test/test_signal.py",start:9856908,end:9905644,audio:0},{filename:"/lib/python3.9/test/test_site.py",start:9905644,end:9931343,audio:0},{filename:"/lib/python3.9/test/test_slice.py",start:9931343,end:9939788,audio:0},{filename:"/lib/python3.9/test/test_smtpd.py",start:9939788,end:9981079,audio:0},{filename:"/lib/python3.9/test/test_smtplib.py",start:9981079,end:10039951,audio:0},{filename:"/lib/python3.9/test/test_smtpnet.py",start:10039951,end:10042963,audio:0},{filename:"/lib/python3.9/test/test_sndhdr.py",start:10042963,end:10044423,audio:0},{filename:"/lib/python3.9/test/test_socket.py",start:10044423,end:10297233,audio:0},{filename:"/lib/python3.9/test/test_socketserver.py",start:10297233,end:10315345,audio:0},{filename:"/lib/python3.9/test/test_sort.py",start:10315345,end:10329092,audio:0},{filename:"/lib/python3.9/test/test_source_encoding.py",start:10329092,end:10337279,audio:0},{filename:"/lib/python3.9/test/test_spwd.py",start:10337279,end:10340053,audio:0},{filename:"/lib/python3.9/test/test_sqlite.py",start:10340053,end:10341001,audio:0},{filename:"/lib/python3.9/test/test_ssl.py",start:10341001,end:10549528,audio:0},{filename:"/lib/python3.9/test/test_startfile.py",start:10549528,end:10550852,audio:0},{filename:"/lib/python3.9/test/test_stat.py",start:10550852,end:10559349,audio:0},{filename:"/lib/python3.9/test/test_statistics.py",start:10559349,end:10671081,audio:0},{filename:"/lib/python3.9/test/test_strftime.py",start:10671081,end:10678804,audio:0},{filename:"/lib/python3.9/test/test_string.py",start:10678804,end:10699076,audio:0},{filename:"/lib/python3.9/test/test_string_literals.py",start:10699076,end:10709285,audio:0},{filename:"/lib/python3.9/test/test_stringprep.py",start:10709285,end:10712398,audio:0},{filename:"/lib/python3.9/test/test_strptime.py",start:10712398,end:10747648,audio:0},{filename:"/lib/python3.9/test/test_strtod.py",start:10747648,end:10768185,audio:0},{filename:"/lib/python3.9/test/test_struct.py",start:10768185,end:10804208,audio:0},{filename:"/lib/python3.9/test/test_structmembers.py",start:10804208,end:10809024,audio:0},{filename:"/lib/python3.9/test/test_structseq.py",start:10809024,end:10812988,audio:0},{filename:"/lib/python3.9/test/test_subclassinit.py",start:10812988,end:10821301,audio:0},{filename:"/lib/python3.9/test/test_subprocess.py",start:10821301,end:10977552,audio:0},{filename:"/lib/python3.9/test/test_sunau.py",start:10977552,end:10983677,audio:0},{filename:"/lib/python3.9/test/test_sundry.py",start:10983677,end:10985800,audio:0},{filename:"/lib/python3.9/test/test_super.py",start:10985800,end:10995628,audio:0},{filename:"/lib/python3.9/test/test_support.py",start:10995628,end:11020105,audio:0},{filename:"/lib/python3.9/test/test_symbol.py",start:11020105,end:11022213,audio:0},{filename:"/lib/python3.9/test/test_symtable.py",start:11022213,end:11031519,audio:0},{filename:"/lib/python3.9/test/test_syntax.py",start:11031519,end:11065540,audio:0},{filename:"/lib/python3.9/test/test_sys.py",start:11065540,end:11122266,audio:0},{filename:"/lib/python3.9/test/test_sys_setprofile.py",start:11122266,end:11134871,audio:0},{filename:"/lib/python3.9/test/test_sys_settrace.py",start:11134871,end:11182195,audio:0},{filename:"/lib/python3.9/test/test_sysconfig.py",start:11182195,end:11199617,audio:0},{filename:"/lib/python3.9/test/test_syslog.py",start:11199617,end:11200795,audio:0},{filename:"/lib/python3.9/test/test_tabnanny.py",start:11200795,end:11214554,audio:0},{filename:"/lib/python3.9/test/test_tarfile.py",start:11214554,end:11318435,audio:0},{filename:"/lib/python3.9/test/test_tcl.py",start:11318435,end:11350191,audio:0},{filename:"/lib/python3.9/test/test_telnetlib.py",start:11350191,end:11363245,audio:0},{filename:"/lib/python3.9/test/test_tempfile.py",start:11363245,end:11416930,audio:0},{filename:"/lib/python3.9/test/test_textwrap.py",start:11416930,end:11456700,audio:0},{filename:"/lib/python3.9/test/test_thread.py",start:11456700,end:11465264,audio:0},{filename:"/lib/python3.9/test/test_threadedtempfile.py",start:11465264,end:11467162,audio:0},{filename:"/lib/python3.9/test/test_threading.py",start:11467162,end:11519020,audio:0},{filename:"/lib/python3.9/test/test_threading_local.py",start:11519020,end:11525254,audio:0},{filename:"/lib/python3.9/test/test_threadsignals.py",start:11525254,end:11535588,audio:0},{filename:"/lib/python3.9/test/test_time.py",start:11535588,end:11576197,audio:0},{filename:"/lib/python3.9/test/test_timeit.py",start:11576197,end:11591351,audio:0},{filename:"/lib/python3.9/test/test_timeout.py",start:11591351,end:11602728,audio:0},{filename:"/lib/python3.9/test/test_tix.py",start:11602728,end:11603484,audio:0},{filename:"/lib/python3.9/test/test_tk.py",start:11603484,end:11603846,audio:0},{filename:"/lib/python3.9/test/test_tokenize.py",start:11603846,end:11668547,audio:0},{filename:"/lib/python3.9/test/test_trace.py",start:11668547,end:11689010,audio:0},{filename:"/lib/python3.9/test/test_traceback.py",start:11689010,end:11737994,audio:0},{filename:"/lib/python3.9/test/test_tracemalloc.py",start:11737994,end:11778239,audio:0},{filename:"/lib/python3.9/test/test_ttk_guionly.py",start:11778239,end:11778985,audio:0},{filename:"/lib/python3.9/test/test_ttk_textonly.py",start:11778985,end:11779284,audio:0},{filename:"/lib/python3.9/test/test_tuple.py",start:11779284,end:11798591,audio:0},{filename:"/lib/python3.9/test/test_turtle.py",start:11798591,end:11811682,audio:0},{filename:"/lib/python3.9/test/test_type_comments.py",start:11811682,end:11822460,audio:0},{filename:"/lib/python3.9/test/test_typechecks.py",start:11822460,end:11825075,audio:0},{filename:"/lib/python3.9/test/test_types.py",start:11825075,end:11885587,audio:0},{filename:"/lib/python3.9/test/test_typing.py",start:11885587,end:12023627,audio:0},{filename:"/lib/python3.9/test/test_ucn.py",start:12023627,end:12033352,audio:0},{filename:"/lib/python3.9/test/test_unary.py",start:12033352,end:12035017,audio:0},{filename:"/lib/python3.9/test/test_unicode.py",start:12035017,end:12171334,audio:0},{filename:"/lib/python3.9/test/test_unicode_file.py",start:12171334,end:12177213,audio:0},{filename:"/lib/python3.9/test/test_unicode_file_functions.py",start:12177213,end:12184217,audio:0},{filename:"/lib/python3.9/test/test_unicode_identifiers.py",start:12184217,end:12185201,audio:0},{filename:"/lib/python3.9/test/test_unicodedata.py",start:12185201,end:12201210,audio:0},{filename:"/lib/python3.9/test/test_unittest.py",start:12201210,end:12201496,audio:0},{filename:"/lib/python3.9/test/test_univnewlines.py",start:12201496,end:12205418,audio:0},{filename:"/lib/python3.9/test/test_unpack.py",start:12205418,end:12208504,audio:0},{filename:"/lib/python3.9/test/test_unpack_ex.py",start:12208504,end:12218808,audio:0},{filename:"/lib/python3.9/test/test_unparse.py",start:12218808,end:12237473,audio:0},{filename:"/lib/python3.9/test/test_urllib.py",start:12237473,end:12309069,audio:0},{filename:"/lib/python3.9/test/test_urllib2.py",start:12309069,end:12388017,audio:0},{filename:"/lib/python3.9/test/test_urllib2_localnet.py",start:12388017,end:12412976,audio:0},{filename:"/lib/python3.9/test/test_urllib2net.py",start:12412976,end:12425982,audio:0},{filename:"/lib/python3.9/test/test_urllib_response.py",start:12425982,end:12427919,audio:0},{filename:"/lib/python3.9/test/test_urllibnet.py",start:12427919,end:12437425,audio:0},{filename:"/lib/python3.9/test/test_urlparse.py",start:12437425,end:12502764,audio:0},{filename:"/lib/python3.9/test/test_userdict.py",start:12502764,end:12510508,audio:0},{filename:"/lib/python3.9/test/test_userlist.py",start:12510508,end:12512524,audio:0},{filename:"/lib/python3.9/test/test_userstring.py",start:12512524,end:12514985,audio:0},{filename:"/lib/python3.9/test/test_utf8_mode.py",start:12514985,end:12524422,audio:0},{filename:"/lib/python3.9/test/test_utf8source.py",start:12524422,end:12525597,audio:0},{filename:"/lib/python3.9/test/test_uu.py",start:12525597,end:12533820,audio:0},{filename:"/lib/python3.9/test/test_uuid.py",start:12533820,end:12574730,audio:0},{filename:"/lib/python3.9/test/test_venv.py",start:12574730,end:12597050,audio:0},{filename:"/lib/python3.9/test/test_wait3.py",start:12597050,end:12598899,audio:0},{filename:"/lib/python3.9/test/test_wait4.py",start:12598899,end:12600088,audio:0},{filename:"/lib/python3.9/test/test_wave.py",start:12600088,end:12606773,audio:0},{filename:"/lib/python3.9/test/test_weakref.py",start:12606773,end:12681012,audio:0},{filename:"/lib/python3.9/test/test_weakset.py",start:12681012,end:12696560,audio:0},{filename:"/lib/python3.9/test/test_webbrowser.py",start:12696560,end:12707282,audio:0},{filename:"/lib/python3.9/test/test_winconsoleio.py",start:12707282,end:12713771,audio:0},{filename:"/lib/python3.9/test/test_winreg.py",start:12713771,end:12735666,audio:0},{filename:"/lib/python3.9/test/test_winsound.py",start:12735666,end:12740343,audio:0},{filename:"/lib/python3.9/test/test_with.py",start:12740343,end:12766957,audio:0},{filename:"/lib/python3.9/test/test_wsgiref.py",start:12766957,end:12797828,audio:0},{filename:"/lib/python3.9/test/test_xdrlib.py",start:12797828,end:12800054,audio:0},{filename:"/lib/python3.9/test/test_xml_dom_minicompat.py",start:12800054,end:12804336,audio:0},{filename:"/lib/python3.9/test/test_xml_etree.py",start:12804336,end:12962471,audio:0},{filename:"/lib/python3.9/test/test_xml_etree_c.py",start:12962471,end:12970722,audio:0},{filename:"/lib/python3.9/test/test_xmlrpc.py",start:12970722,end:13029666,audio:0},{filename:"/lib/python3.9/test/test_xmlrpc_net.py",start:13029666,end:13030681,audio:0},{filename:"/lib/python3.9/test/test_xxtestfuzz.py",start:13030681,end:13031351,audio:0},{filename:"/lib/python3.9/test/test_yield_from.py",start:13031351,end:13062085,audio:0},{filename:"/lib/python3.9/test/test_zipapp.py",start:13062085,end:13078389,audio:0},{filename:"/lib/python3.9/test/test_zipfile.py",start:13078389,end:13198394,audio:0},{filename:"/lib/python3.9/test/test_zipfile64.py",start:13198394,end:13204336,audio:0},{filename:"/lib/python3.9/test/test_zipimport.py",start:13204336,end:13234316,audio:0},{filename:"/lib/python3.9/test/test_zipimport_support.py",start:13234316,end:13245005,audio:0},{filename:"/lib/python3.9/test/test_zlib.py",start:13245005,end:13279797,audio:0},{filename:"/lib/python3.9/test/testcodec.py",start:13279797,end:13280843,audio:0},{filename:"/lib/python3.9/test/testtar.tar",start:13280843,end:13716043,audio:0},{filename:"/lib/python3.9/test/tf_inherit_check.py",start:13716043,end:13716757,audio:0},{filename:"/lib/python3.9/test/time_hashlib.py",start:13716757,end:13719700,audio:0},{filename:"/lib/python3.9/test/tokenize_tests-latin1-coding-cookie-and-utf8-bom-sig.txt",start:13719700,end:13720143,audio:0},{filename:"/lib/python3.9/test/tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt",start:13720143,end:13720445,audio:0},{filename:"/lib/python3.9/test/tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt",start:13720445,end:13720866,audio:0},{filename:"/lib/python3.9/test/tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt",start:13720866,end:13721192,audio:0},{filename:"/lib/python3.9/test/tokenize_tests.txt",start:13721192,end:13723909,audio:0},{filename:"/lib/python3.9/test/win_console_handler.py",start:13723909,end:13725325,audio:0},{filename:"/lib/python3.9/test/xmltests.py",start:13725325,end:13725824,audio:0},{filename:"/lib/python3.9/test/zip_cp437_header.zip",start:13725824,end:13726094,audio:0},{filename:"/lib/python3.9/test/zipdir.zip",start:13726094,end:13726468,audio:0},{filename:"/lib/python3.9/test/audiodata/pluck-alaw.aifc",start:13726468,end:13733378,audio:0},{filename:"/lib/python3.9/test/audiodata/pluck-pcm16.aiff",start:13733378,end:13746884,audio:0},{filename:"/lib/python3.9/test/audiodata/pluck-pcm16.au",start:13746884,end:13760136,audio:0},{filename:"/lib/python3.9/test/audiodata/pluck-pcm16.wav",start:13760136,end:13773506,audio:1},{filename:"/lib/python3.9/test/audiodata/pluck-pcm24.aiff",start:13773506,end:13793626,audio:0},{filename:"/lib/python3.9/test/audiodata/pluck-pcm24.au",start:13793626,end:13813492,audio:0},{filename:"/lib/python3.9/test/audiodata/pluck-pcm24.wav",start:13813492,end:13833476,audio:1},{filename:"/lib/python3.9/test/audiodata/pluck-pcm32.aiff",start:13833476,end:13860210,audio:0},{filename:"/lib/python3.9/test/audiodata/pluck-pcm32.au",start:13860210,end:13886690,audio:0},{filename:"/lib/python3.9/test/audiodata/pluck-pcm32.wav",start:13886690,end:13913288,audio:1},{filename:"/lib/python3.9/test/audiodata/pluck-pcm8.aiff",start:13913288,end:13920180,audio:0},{filename:"/lib/python3.9/test/audiodata/pluck-pcm8.au",start:13920180,end:13926818,audio:0},{filename:"/lib/python3.9/test/audiodata/pluck-pcm8.wav",start:13926818,end:13933574,audio:1},{filename:"/lib/python3.9/test/audiodata/pluck-ulaw.aifc",start:13933574,end:13940484,audio:0},{filename:"/lib/python3.9/test/audiodata/pluck-ulaw.au",start:13940484,end:13947122,audio:0},{filename:"/lib/python3.9/test/capath/4e1295a3.0",start:13947122,end:13947936,audio:0},{filename:"/lib/python3.9/test/capath/5ed36f99.0",start:13947936,end:13950505,audio:0},{filename:"/lib/python3.9/test/capath/6e88d7b8.0",start:13950505,end:13951319,audio:0},{filename:"/lib/python3.9/test/capath/99d0fa06.0",start:13951319,end:13953888,audio:0},{filename:"/lib/python3.9/test/capath/b1930218.0",start:13953888,end:13955482,audio:0},{filename:"/lib/python3.9/test/capath/ceff1710.0",start:13955482,end:13957076,audio:0},{filename:"/lib/python3.9/test/data/README",start:13957076,end:13957205,audio:0},{filename:"/lib/python3.9/test/cjkencodings/big5-utf8.txt",start:13957205,end:13957769,audio:0},{filename:"/lib/python3.9/test/cjkencodings/big5.txt",start:13957769,end:13958201,audio:0},{filename:"/lib/python3.9/test/cjkencodings/big5hkscs-utf8.txt",start:13958201,end:13958233,audio:0},{filename:"/lib/python3.9/test/cjkencodings/big5hkscs.txt",start:13958233,end:13958256,audio:0},{filename:"/lib/python3.9/test/cjkencodings/cp949-utf8.txt",start:13958256,end:13958734,audio:0},{filename:"/lib/python3.9/test/cjkencodings/cp949.txt",start:13958734,end:13959080,audio:0},{filename:"/lib/python3.9/test/cjkencodings/euc_jisx0213-utf8.txt",start:13959080,end:13960224,audio:0},{filename:"/lib/python3.9/test/cjkencodings/euc_jisx0213.txt",start:13960224,end:13961017,audio:0},{filename:"/lib/python3.9/test/cjkencodings/euc_jp-utf8.txt",start:13961017,end:13962111,audio:0},{filename:"/lib/python3.9/test/cjkencodings/euc_jp.txt",start:13962111,end:13962871,audio:0},{filename:"/lib/python3.9/test/cjkencodings/euc_kr-utf8.txt",start:13962871,end:13963457,audio:0},{filename:"/lib/python3.9/test/cjkencodings/euc_kr.txt",start:13963457,end:13963913,audio:0},{filename:"/lib/python3.9/test/cjkencodings/gb18030-utf8.txt",start:13963913,end:13965040,audio:0},{filename:"/lib/python3.9/test/cjkencodings/gb18030.txt",start:13965040,end:13965904,audio:0},{filename:"/lib/python3.9/test/cjkencodings/gb2312-utf8.txt",start:13965904,end:13966384,audio:0},{filename:"/lib/python3.9/test/cjkencodings/gb2312.txt",start:13966384,end:13966708,audio:0},{filename:"/lib/python3.9/test/cjkencodings/gbk-utf8.txt",start:13966708,end:13967751,audio:0},{filename:"/lib/python3.9/test/cjkencodings/gbk.txt",start:13967751,end:13968506,audio:0},{filename:"/lib/python3.9/test/cjkencodings/hz-utf8.txt",start:13968506,end:13968595,audio:0},{filename:"/lib/python3.9/test/cjkencodings/hz.txt",start:13968595,end:13968678,audio:0},{filename:"/lib/python3.9/test/cjkencodings/iso2022_jp-utf8.txt",start:13968678,end:13969772,audio:0},{filename:"/lib/python3.9/test/cjkencodings/iso2022_jp.txt",start:13969772,end:13970640,audio:0},{filename:"/lib/python3.9/test/cjkencodings/iso2022_kr-utf8.txt",start:13970640,end:13971203,audio:0},{filename:"/lib/python3.9/test/cjkencodings/iso2022_kr.txt",start:13971203,end:13971705,audio:0},{filename:"/lib/python3.9/test/cjkencodings/johab-utf8.txt",start:13971705,end:13972183,audio:0},{filename:"/lib/python3.9/test/cjkencodings/johab.txt",start:13972183,end:13972529,audio:0},{filename:"/lib/python3.9/test/cjkencodings/shift_jis-utf8.txt",start:13972529,end:13973623,audio:0},{filename:"/lib/python3.9/test/cjkencodings/shift_jis.txt",start:13973623,end:13974383,audio:0},{filename:"/lib/python3.9/test/cjkencodings/shift_jisx0213-utf8.txt",start:13974383,end:13975527,audio:0},{filename:"/lib/python3.9/test/cjkencodings/shift_jisx0213.txt",start:13975527,end:13976316,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/abs.decTest",start:13976316,end:13982605,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/add.decTest",start:13982605,end:14122943,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/and.decTest",start:14122943,end:14139307,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/base.decTest",start:14139307,end:14200662,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/clamp.decTest",start:14200662,end:14211671,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/class.decTest",start:14211671,end:14218047,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/compare.decTest",start:14218047,end:14247674,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/comparetotal.decTest",start:14247674,end:14282097,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/comparetotmag.decTest",start:14282097,end:14318226,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/copy.decTest",start:14318226,end:14321602,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/copyabs.decTest",start:14321602,end:14325086,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/copynegate.decTest",start:14325086,end:14328759,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/copysign.decTest",start:14328759,end:14336137,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddAbs.decTest",start:14336137,end:14341038,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddAdd.decTest",start:14341038,end:14419133,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddAnd.decTest",start:14419133,end:14437752,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddBase.decTest",start:14437752,end:14492209,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddCanonical.decTest",start:14492209,end:14511117,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddClass.decTest",start:14511117,end:14515024,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddCompare.decTest",start:14515024,end:14545306,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddCompareSig.decTest",start:14545306,end:14573714,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddCompareTotal.decTest",start:14573714,end:14604352,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddCompareTotalMag.decTest",start:14604352,end:14636770,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddCopy.decTest",start:14636770,end:14640391,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddCopyAbs.decTest",start:14640391,end:14644120,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddCopyNegate.decTest",start:14644120,end:14648002,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddCopySign.decTest",start:14648002,end:14655634,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddDivide.decTest",start:14655634,end:14703771,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddDivideInt.decTest",start:14703771,end:14723355,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddEncode.decTest",start:14723355,end:14748043,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddFMA.decTest",start:14748043,end:14850223,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddInvert.decTest",start:14850223,end:14860584,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddLogB.decTest",start:14860584,end:14866824,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddMax.decTest",start:14866824,end:14879138,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddMaxMag.decTest",start:14879138,end:14891881,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddMin.decTest",start:14891881,end:14903850,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddMinMag.decTest",start:14903850,end:14915475,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddMinus.decTest",start:14915475,end:14919265,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddMultiply.decTest",start:14919265,end:14948569,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddNextMinus.decTest",start:14948569,end:14955396,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddNextPlus.decTest",start:14955396,end:14962119,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddNextToward.decTest",start:14962119,end:14987109,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddOr.decTest",start:14987109,end:15003132,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddPlus.decTest",start:15003132,end:15006878,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddQuantize.decTest",start:15006878,end:15049371,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddReduce.decTest",start:15049371,end:15056831,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddRemainder.decTest",start:15056831,end:15083818,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddRemainderNear.decTest",start:15083818,end:15114077,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddRotate.decTest",start:15114077,end:15128159,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddSameQuantum.decTest",start:15128159,end:15145700,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddScaleB.decTest",start:15145700,end:15158487,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddShift.decTest",start:15158487,end:15171898,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddSubtract.decTest",start:15171898,end:15207296,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddToIntegral.decTest",start:15207296,end:15219488,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ddXor.decTest",start:15219488,end:15237190,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/decDouble.decTest",start:15237190,end:15239399,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/decQuad.decTest",start:15239399,end:15241606,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/decSingle.decTest",start:15241606,end:15243062,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/divide.decTest",start:15243062,end:15280866,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/divideint.decTest",start:15280866,end:15301302,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqAbs.decTest",start:15301302,end:15306577,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqAdd.decTest",start:15306577,end:15395774,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqAnd.decTest",start:15395774,end:15424897,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqBase.decTest",start:15424897,end:15483852,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqCanonical.decTest",start:15483852,end:15511171,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqClass.decTest",start:15511171,end:15515191,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqCompare.decTest",start:15515191,end:15548313,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqCompareSig.decTest",start:15548313,end:15578008,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqCompareTotal.decTest",start:15578008,end:15608854,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqCompareTotalMag.decTest",start:15608854,end:15641480,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqCopy.decTest",start:15641480,end:15645467,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqCopyAbs.decTest",start:15645467,end:15649568,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqCopyNegate.decTest",start:15649568,end:15653816,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqCopySign.decTest",start:15653816,end:15662044,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqDivide.decTest",start:15662044,end:15717146,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqDivideInt.decTest",start:15717146,end:15736972,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqEncode.decTest",start:15736972,end:15768402,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqFMA.decTest",start:15768402,end:15898392,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqInvert.decTest",start:15898392,end:15914516,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqLogB.decTest",start:15914516,end:15920896,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqMax.decTest",start:15920896,end:15933245,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqMaxMag.decTest",start:15933245,end:15946034,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqMin.decTest",start:15946034,end:15958038,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqMinMag.decTest",start:15958038,end:15969687,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqMinus.decTest",start:15969687,end:15973843,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqMultiply.decTest",start:15973843,end:16006336,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqNextMinus.decTest",start:16006336,end:16014987,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqNextPlus.decTest",start:16014987,end:16023514,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqNextToward.decTest",start:16023514,end:16053240,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqOr.decTest",start:16053240,end:16083857,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqPlus.decTest",start:16083857,end:16087969,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqQuantize.decTest",start:16087969,end:16131061,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqReduce.decTest",start:16131061,end:16138881,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqRemainder.decTest",start:16138881,end:16166444,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqRemainderNear.decTest",start:16166444,end:16197733,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqRotate.decTest",start:16197733,end:16218713,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqSameQuantum.decTest",start:16218713,end:16236858,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqScaleB.decTest",start:16236858,end:16252917,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqShift.decTest",start:16252917,end:16272353,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqSubtract.decTest",start:16272353,end:16314281,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqToIntegral.decTest",start:16314281,end:16326505,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dqXor.decTest",start:16326505,end:16354768,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dsBase.decTest",start:16354768,end:16404334,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/dsEncode.decTest",start:16404334,end:16420220,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/exp.decTest",start:16420220,end:16459661,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/extra.decTest",start:16459661,end:16552173,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/fma.decTest",start:16552173,end:16747499,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/inexact.decTest",start:16747499,end:16757991,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/invert.decTest",start:16757991,end:16766277,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/ln.decTest",start:16766277,end:16801802,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/log10.decTest",start:16801802,end:16834498,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/logb.decTest",start:16834498,end:16841817,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/max.decTest",start:16841817,end:16857789,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/maxmag.decTest",start:16857789,end:16875141,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/min.decTest",start:16875141,end:16890831,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/minmag.decTest",start:16890831,end:16906269,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/minus.decTest",start:16906269,end:16913694,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/multiply.decTest",start:16913694,end:16952008,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/nextminus.decTest",start:16952008,end:16958950,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/nextplus.decTest",start:16958950,end:16965873,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/nexttoward.decTest",start:16965873,end:16991097,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/or.decTest",start:16991097,end:17006954,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/plus.decTest",start:17006954,end:17014836,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/power.decTest",start:17014836,end:17109817,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/powersqrt.decTest",start:17109817,end:17268472,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/quantize.decTest",start:17268472,end:17315754,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/randomBound32.decTest",start:17315754,end:17620260,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/randoms.decTest",start:17620260,end:17911333,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/reduce.decTest",start:17911333,end:17920652,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/remainder.decTest",start:17920652,end:17947776,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/remainderNear.decTest",start:17947776,end:17972794,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/rescale.decTest",start:17972794,end:18008051,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/rotate.decTest",start:18008051,end:18019939,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/rounding.decTest",start:18019939,end:18083711,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/samequantum.decTest",start:18083711,end:18099913,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/scaleb.decTest",start:18099913,end:18109825,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/shift.decTest",start:18109825,end:18121497,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/squareroot.decTest",start:18121497,end:18313956,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/subtract.decTest",start:18313956,end:18358261,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/testall.decTest",start:18358261,end:18360992,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/tointegral.decTest",start:18360992,end:18369856,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/tointegralx.decTest",start:18369856,end:18381716,audio:0},{filename:"/lib/python3.9/test/decimaltestdata/xor.decTest",start:18381716,end:18398045,audio:0},{filename:"/lib/python3.9/test/xmltestdata/expat224_utf8_bug.xml",start:18398045,end:18399081,audio:0},{filename:"/lib/python3.9/test/xmltestdata/simple-ns.xml",start:18399081,end:18399233,audio:0},{filename:"/lib/python3.9/test/xmltestdata/simple.xml",start:18399233,end:18399355,audio:0},{filename:"/lib/python3.9/test/xmltestdata/test.xml",start:18399355,end:18400743,audio:0},{filename:"/lib/python3.9/test/xmltestdata/test.xml.out",start:18400743,end:18402130,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/README",start:18402130,end:18403996,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/c14nComment.xml",start:18403996,end:18404249,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/c14nDefault.xml",start:18404249,end:18404404,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/c14nPrefix.xml",start:18404404,end:18404661,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/c14nPrefixQname.xml",start:18404661,end:18405047,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/c14nPrefixQnameXpathElem.xml",start:18405047,end:18405477,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/c14nQname.xml",start:18405477,end:18405807,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/c14nQnameElem.xml",start:18405807,end:18406097,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/c14nQnameXpathElem.xml",start:18406097,end:18406471,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/c14nTrim.xml",start:18406471,end:18406722,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/doc.dtd",start:18406722,end:18406790,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/doc.xsl",start:18406790,end:18406943,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inC14N1.xml",start:18406943,end:18407167,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inC14N2.xml",start:18407167,end:18407337,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inC14N3.xml",start:18407337,end:18407909,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inC14N4.xml",start:18407909,end:18408425,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inC14N5.xml",start:18408425,end:18408740,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inC14N6.xml",start:18408740,end:18408802,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inNsContent.xml",start:18408802,end:18409170,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inNsDefault.xml",start:18409170,end:18409255,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inNsPushdown.xml",start:18409255,end:18409382,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inNsRedecl.xml",start:18409382,end:18409559,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inNsSort.xml",start:18409559,end:18409732,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inNsSuperfluous.xml",start:18409732,end:18409926,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/inNsXml.xml",start:18409926,end:18410108,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inC14N1_c14nComment.xml",start:18410108,end:18410264,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inC14N1_c14nDefault.xml",start:18410264,end:18410364,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inC14N2_c14nDefault.xml",start:18410364,end:18410533,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inC14N2_c14nTrim.xml",start:18410533,end:18410632,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inC14N3_c14nDefault.xml",start:18410632,end:18411037,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inC14N3_c14nPrefix.xml",start:18411037,end:18411515,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inC14N3_c14nTrim.xml",start:18411515,end:18411844,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inC14N4_c14nDefault.xml",start:18411844,end:18412275,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inC14N4_c14nTrim.xml",start:18412275,end:18412677,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inC14N5_c14nDefault.xml",start:18412677,end:18412726,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inC14N5_c14nTrim.xml",start:18412726,end:18412770,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inC14N6_c14nDefault.xml",start:18412770,end:18412783,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsContent_c14nDefault.xml",start:18412783,end:18413008,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsContent_c14nPrefixQnameXpathElem.xml",start:18413008,end:18413336,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsContent_c14nQnameElem.xml",start:18413336,end:18413606,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsContent_c14nQnameXpathElem.xml",start:18413606,end:18413950,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsDefault_c14nDefault.xml",start:18413950,end:18414022,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsDefault_c14nPrefix.xml",start:18414022,end:18414116,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsPushdown_c14nDefault.xml",start:18414116,end:18414308,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsPushdown_c14nPrefix.xml",start:18414308,end:18414516,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsRedecl_c14nDefault.xml",start:18414516,end:18414696,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsRedecl_c14nPrefix.xml",start:18414696,end:18414869,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsSort_c14nDefault.xml",start:18414869,end:18415055,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsSort_c14nPrefix.xml",start:18415055,end:18415255,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsSuperfluous_c14nDefault.xml",start:18415255,end:18415442,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsSuperfluous_c14nPrefix.xml",start:18415442,end:18415565,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsXml_c14nDefault.xml",start:18415565,end:18415701,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsXml_c14nPrefix.xml",start:18415701,end:18415850,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsXml_c14nPrefixQname.xml",start:18415850,end:18416042,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/out_inNsXml_c14nQname.xml",start:18416042,end:18416223,audio:0},{filename:"/lib/python3.9/test/xmltestdata/c14n-20/world.txt",start:18416223,end:18416228,audio:0},{filename:"/lib/python3.9/test/dtracedata/assert_usable.d",start:18416228,end:18416283,audio:0},{filename:"/lib/python3.9/test/dtracedata/assert_usable.stp",start:18416283,end:18416337,audio:0},{filename:"/lib/python3.9/test/dtracedata/call_stack.d",start:18416337,end:18416994,audio:0},{filename:"/lib/python3.9/test/dtracedata/call_stack.d.expected",start:18416994,end:18417791,audio:0},{filename:"/lib/python3.9/test/dtracedata/call_stack.py",start:18417791,end:18418283,audio:0},{filename:"/lib/python3.9/test/dtracedata/call_stack.stp",start:18418283,end:18419090,audio:0},{filename:"/lib/python3.9/test/dtracedata/call_stack.stp.expected",start:18419090,end:18419696,audio:0},{filename:"/lib/python3.9/test/dtracedata/gc.d",start:18419696,end:18419993,audio:0},{filename:"/lib/python3.9/test/dtracedata/gc.d.expected",start:18419993,end:18420077,audio:0},{filename:"/lib/python3.9/test/dtracedata/gc.py",start:18420077,end:18420232,audio:0},{filename:"/lib/python3.9/test/dtracedata/gc.stp",start:18420232,end:18420676,audio:0},{filename:"/lib/python3.9/test/dtracedata/gc.stp.expected",start:18420676,end:18420768,audio:0},{filename:"/lib/python3.9/test/dtracedata/instance.py",start:18420768,end:18421085,audio:0},{filename:"/lib/python3.9/test/dtracedata/line.d",start:18421085,end:18421264,audio:0},{filename:"/lib/python3.9/test/dtracedata/line.d.expected",start:18421264,end:18421770,audio:0},{filename:"/lib/python3.9/test/dtracedata/line.py",start:18421770,end:18422063,audio:0},{filename:"/lib/python3.9/test/eintrdata/eintr_tester.py",start:18422063,end:18440091,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.bmp",start:18440091,end:18441253,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.exr",start:18441253,end:18443888,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.gif",start:18443888,end:18444498,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.jpg",start:18444498,end:18445041,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.pbm",start:18445041,end:18445082,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.pgm",start:18445082,end:18445351,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.png",start:18445351,end:18446371,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.ppm",start:18446371,end:18447152,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.ras",start:18447152,end:18448208,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.sgi",start:18448208,end:18450175,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.tiff",start:18450175,end:18451501,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.webp",start:18451501,end:18451933,audio:0},{filename:"/lib/python3.9/test/imghdrdata/python.xbm",start:18451933,end:18452215,audio:0},{filename:"/lib/python3.9/test/libregrtest/__init__.py",start:18452215,end:18452334,audio:0},{filename:"/lib/python3.9/test/libregrtest/cmdline.py",start:18452334,end:18471415,audio:0},{filename:"/lib/python3.9/test/libregrtest/main.py",start:18471415,end:18496168,audio:0},{filename:"/lib/python3.9/test/libregrtest/pgo.py",start:18496168,end:18497507,audio:0},{filename:"/lib/python3.9/test/libregrtest/refleak.py",start:18497507,end:18505703,audio:0},{filename:"/lib/python3.9/test/libregrtest/runtest.py",start:18505703,end:18516242,audio:0},{filename:"/lib/python3.9/test/libregrtest/runtest_mp.py",start:18516242,end:18531887,audio:0},{filename:"/lib/python3.9/test/libregrtest/save_env.py",start:18531887,end:18543703,audio:0},{filename:"/lib/python3.9/test/libregrtest/setup.py",start:18543703,end:18548093,audio:0},{filename:"/lib/python3.9/test/libregrtest/utils.py",start:18548093,end:18550191,audio:0},{filename:"/lib/python3.9/test/libregrtest/win_utils.py",start:18550191,end:18556757,audio:0},{filename:"/lib/python3.9/test/subprocessdata/fd_status.py",start:18556757,end:18557592,audio:0},{filename:"/lib/python3.9/test/subprocessdata/input_reader.py",start:18557592,end:18557722,audio:0},{filename:"/lib/python3.9/test/subprocessdata/qcat.py",start:18557722,end:18557881,audio:0},{filename:"/lib/python3.9/test/subprocessdata/qgrep.py",start:18557881,end:18558134,audio:0},{filename:"/lib/python3.9/test/subprocessdata/sigchild_ignore.py",start:18558134,end:18558891,audio:0},{filename:"/lib/python3.9/test/sndhdrdata/README",start:18558891,end:18559088,audio:0},{filename:"/lib/python3.9/test/sndhdrdata/sndhdr.8svx",start:18559088,end:18559198,audio:0},{filename:"/lib/python3.9/test/sndhdrdata/sndhdr.aifc",start:18559198,end:18559304,audio:0},{filename:"/lib/python3.9/test/sndhdrdata/sndhdr.aiff",start:18559304,end:18559412,audio:0},{filename:"/lib/python3.9/test/sndhdrdata/sndhdr.au",start:18559412,end:18559476,audio:0},{filename:"/lib/python3.9/test/sndhdrdata/sndhdr.hcom",start:18559476,end:18559732,audio:0},{filename:"/lib/python3.9/test/sndhdrdata/sndhdr.sndt",start:18559732,end:18559861,audio:0},{filename:"/lib/python3.9/test/sndhdrdata/sndhdr.voc",start:18559861,end:18559924,audio:0},{filename:"/lib/python3.9/test/sndhdrdata/sndhdr.wav",start:18559924,end:18559988,audio:1},{filename:"/lib/python3.9/test/support/__init__.py",start:18559988,end:18668901,audio:0},{filename:"/lib/python3.9/test/support/bytecode_helper.py",start:18668901,end:18670509,audio:0},{filename:"/lib/python3.9/test/support/hashlib_helper.py",start:18670509,end:18671787,audio:0},{filename:"/lib/python3.9/test/support/logging_helper.py",start:18671787,end:18672703,audio:0},{filename:"/lib/python3.9/test/support/script_helper.py",start:18672703,end:18683378,audio:0},{filename:"/lib/python3.9/test/support/socket_helper.py",start:18683378,end:18694826,audio:0},{filename:"/lib/python3.9/test/support/testresult.py",start:18694826,end:18701495,audio:0},{filename:"/lib/python3.9/test/tracedmodules/__init__.py",start:18701495,end:18701698,audio:0},{filename:"/lib/python3.9/test/tracedmodules/testmod.py",start:18701698,end:18701841,audio:0},{filename:"/lib/python3.9/test/encoded_modules/__init__.py",start:18701841,end:18703115,audio:0},{filename:"/lib/python3.9/test/encoded_modules/module_iso_8859_1.py",start:18703115,end:18703353,audio:0},{filename:"/lib/python3.9/test/encoded_modules/module_koi8_r.py",start:18703353,end:18703466,audio:0},{filename:"/lib/python3.9/test/test_import/__init__.py",start:18703466,end:18755769,audio:0},{filename:"/lib/python3.9/test/test_import/__main__.py",start:18755769,end:18755820,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/basic.py",start:18755820,end:18755898,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/basic2.py",start:18755898,end:18755918,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/binding.py",start:18755918,end:18755985,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/binding2.py",start:18755985,end:18756050,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/from_cycle1.py",start:18756050,end:18756083,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/from_cycle2.py",start:18756083,end:18756116,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/indirect.py",start:18756116,end:18756144,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/rebinding.py",start:18756144,end:18756266,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/rebinding2.py",start:18756266,end:18756332,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/source.py",start:18756332,end:18756359,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/subpackage.py",start:18756359,end:18756438,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/use.py",start:18756438,end:18756471,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/util.py",start:18756471,end:18756492,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/subpkg/subpackage2.py",start:18756492,end:18756542,audio:0},{filename:"/lib/python3.9/test/test_import/data/circular_imports/subpkg/util.py",start:18756542,end:18756563,audio:0},{filename:"/lib/python3.9/test/test_import/data/package/__init__.py",start:18756563,end:18756606,audio:0},{filename:"/lib/python3.9/test/test_import/data/package/submodule.py",start:18756606,end:18756606,audio:0},{filename:"/lib/python3.9/test/test_import/data/package2/submodule1.py",start:18756606,end:18756677,audio:0},{filename:"/lib/python3.9/test/test_import/data/package2/submodule2.py",start:18756677,end:18756677,audio:0},{filename:"/lib/python3.9/test/test_import/data/unwritable/__init__.py",start:18756677,end:18757022,audio:0},{filename:"/lib/python3.9/test/test_import/data/unwritable/x.py",start:18757022,end:18757022,audio:0},{filename:"/lib/python3.9/test/test_importlib/__init__.py",start:18757022,end:18757164,audio:0},{filename:"/lib/python3.9/test/test_importlib/__main__.py",start:18757164,end:18757222,audio:0},{filename:"/lib/python3.9/test/test_importlib/abc.py",start:18757222,end:18759494,audio:0},{filename:"/lib/python3.9/test/test_importlib/fixtures.py",start:18759494,end:18765394,audio:0},{filename:"/lib/python3.9/test/test_importlib/stubs.py",start:18765394,end:18765627,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_abc.py",start:18765627,end:18799891,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_api.py",start:18799891,end:18818694,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_files.py",start:18818694,end:18819693,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_lazy.py",start:18819693,end:18824622,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_locks.py",start:18824622,end:18829282,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_main.py",start:18829282,end:18837680,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_metadata_api.py",start:18837680,end:18842809,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_namespace_pkgs.py",start:18842809,end:18853630,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_open.py",start:18853630,end:18855882,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_path.py",start:18855882,end:18857132,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_pkg_import.py",start:18857132,end:18859861,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_read.py",start:18859861,end:18861894,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_resource.py",start:18861894,end:18870459,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_spec.py",start:18870459,end:18901538,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_threaded_import.py",start:18901538,end:18911207,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_util.py",start:18911207,end:18946710,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_windows.py",start:18946710,end:18952675,audio:0},{filename:"/lib/python3.9/test/test_importlib/test_zip.py",start:18952675,end:18955205,audio:0},{filename:"/lib/python3.9/test/test_importlib/threaded_import_hangers.py",start:18955205,end:18956689,audio:0},{filename:"/lib/python3.9/test/test_importlib/util.py",start:18956689,end:18975166,audio:0},{filename:"/lib/python3.9/test/test_importlib/builtin/__init__.py",start:18975166,end:18975308,audio:0},{filename:"/lib/python3.9/test/test_importlib/builtin/__main__.py",start:18975308,end:18975366,audio:0},{filename:"/lib/python3.9/test/test_importlib/builtin/test_finder.py",start:18975366,end:18978257,audio:0},{filename:"/lib/python3.9/test/test_importlib/builtin/test_loader.py",start:18978257,end:18981998,audio:0},{filename:"/lib/python3.9/test/test_importlib/data/__init__.py",start:18981998,end:18981998,audio:0},{filename:"/lib/python3.9/test/test_importlib/data/example-21.12-py3-none-any.whl",start:18981998,end:18983453,audio:0},{filename:"/lib/python3.9/test/test_importlib/data/example-21.12-py3.6.egg",start:18983453,end:18984950,audio:0},{filename:"/lib/python3.9/test/test_importlib/data01/__init__.py",start:18984950,end:18984950,audio:0},{filename:"/lib/python3.9/test/test_importlib/data01/binary.file",start:18984950,end:18984954,audio:0},{filename:"/lib/python3.9/test/test_importlib/data01/utf-16.file",start:18984954,end:18984998,audio:0},{filename:"/lib/python3.9/test/test_importlib/data01/utf-8.file",start:18984998,end:18985018,audio:0},{filename:"/lib/python3.9/test/test_importlib/data01/subdirectory/__init__.py",start:18985018,end:18985018,audio:0},{filename:"/lib/python3.9/test/test_importlib/data01/subdirectory/binary.file",start:18985018,end:18985022,audio:0},{filename:"/lib/python3.9/test/test_importlib/data02/__init__.py",start:18985022,end:18985022,audio:0},{filename:"/lib/python3.9/test/test_importlib/data02/one/__init__.py",start:18985022,end:18985022,audio:0},{filename:"/lib/python3.9/test/test_importlib/data02/one/resource1.txt",start:18985022,end:18985035,audio:0},{filename:"/lib/python3.9/test/test_importlib/data02/two/__init__.py",start:18985035,end:18985035,audio:0},{filename:"/lib/python3.9/test/test_importlib/data02/two/resource2.txt",start:18985035,end:18985048,audio:0},{filename:"/lib/python3.9/test/test_importlib/data03/__init__.py",start:18985048,end:18985048,audio:0},{filename:"/lib/python3.9/test/test_importlib/data03/namespace/resource1.txt",start:18985048,end:18985048,audio:0},{filename:"/lib/python3.9/test/test_importlib/data03/namespace/portion1/__init__.py",start:18985048,end:18985048,audio:0},{filename:"/lib/python3.9/test/test_importlib/data03/namespace/portion2/__init__.py",start:18985048,end:18985048,audio:0},{filename:"/lib/python3.9/test/test_importlib/extension/__init__.py",start:18985048,end:18985190,audio:0},{filename:"/lib/python3.9/test/test_importlib/extension/__main__.py",start:18985190,end:18985248,audio:0},{filename:"/lib/python3.9/test/test_importlib/extension/test_case_sensitivity.py",start:18985248,end:18987003,audio:0},{filename:"/lib/python3.9/test/test_importlib/extension/test_finder.py",start:18987003,end:18988275,audio:0},{filename:"/lib/python3.9/test/test_importlib/extension/test_loader.py",start:18988275,end:18999155,audio:0},{filename:"/lib/python3.9/test/test_importlib/extension/test_path_hook.py",start:18999155,end:19000019,audio:0},{filename:"/lib/python3.9/test/test_importlib/frozen/__init__.py",start:19000019,end:19000161,audio:0},{filename:"/lib/python3.9/test/test_importlib/frozen/__main__.py",start:19000161,end:19000219,audio:0},{filename:"/lib/python3.9/test/test_importlib/frozen/test_finder.py",start:19000219,end:19002324,audio:0},{filename:"/lib/python3.9/test/test_importlib/frozen/test_loader.py",start:19002324,end:19011663,audio:0},{filename:"/lib/python3.9/test/test_importlib/import_/__init__.py",start:19011663,end:19011805,audio:0},{filename:"/lib/python3.9/test/test_importlib/import_/__main__.py",start:19011805,end:19011863,audio:0},{filename:"/lib/python3.9/test/test_importlib/import_/test___loader__.py",start:19011863,end:19013724,audio:0},{filename:"/lib/python3.9/test/test_importlib/import_/test___package__.py",start:19013724,end:19019362,audio:0},{filename:"/lib/python3.9/test/test_importlib/import_/test_api.py",start:19019362,end:19023150,audio:0},{filename:"/lib/python3.9/test/test_importlib/import_/test_caching.py",start:19023150,end:19026749,audio:0},{filename:"/lib/python3.9/test/test_importlib/import_/test_fromlist.py",start:19026749,end:19034275,audio:0},{filename:"/lib/python3.9/test/test_importlib/import_/test_meta_path.py",start:19034275,end:19038592,audio:0},{filename:"/lib/python3.9/test/test_importlib/import_/test_packages.py",start:19038592,end:19043136,audio:0},{filename:"/lib/python3.9/test/test_importlib/import_/test_path.py",start:19043136,end:19053679,audio:0},{filename:"/lib/python3.9/test/test_importlib/import_/test_relative_imports.py",start:19053679,end:19063144,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/missing_directory.zip",start:19063144,end:19063659,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/nested_portion1.zip",start:19063659,end:19064215,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/top_level_portion1.zip",start:19064215,end:19064547,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/both_portions/foo/one.py",start:19064547,end:19064578,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/both_portions/foo/two.py",start:19064578,end:19064609,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/module_and_namespace_package/a_test.py",start:19064609,end:19064628,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/module_and_namespace_package/a_test/empty",start:19064628,end:19064628,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/not_a_namespace_pkg/foo/__init__.py",start:19064628,end:19064628,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/not_a_namespace_pkg/foo/one.py",start:19064628,end:19064654,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/portion1/foo/one.py",start:19064654,end:19064680,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/portion2/foo/two.py",start:19064680,end:19064706,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/project1/parent/child/one.py",start:19064706,end:19064732,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/project2/parent/child/two.py",start:19064732,end:19064758,audio:0},{filename:"/lib/python3.9/test/test_importlib/namespace_pkgs/project3/parent/child/three.py",start:19064758,end:19064786,audio:0},{filename:"/lib/python3.9/test/test_importlib/partial/cfimport.py",start:19064786,end:19065611,audio:0},{filename:"/lib/python3.9/test/test_importlib/partial/pool_in_threads.py",start:19065611,end:19066070,audio:0},{filename:"/lib/python3.9/test/test_importlib/source/__init__.py",start:19066070,end:19066212,audio:0},{filename:"/lib/python3.9/test/test_importlib/source/__main__.py",start:19066212,end:19066270,audio:0},{filename:"/lib/python3.9/test/test_importlib/source/test_case_sensitivity.py",start:19066270,end:19069675,audio:0},{filename:"/lib/python3.9/test/test_importlib/source/test_file_loader.py",start:19069675,end:19102127,audio:0},{filename:"/lib/python3.9/test/test_importlib/source/test_finder.py",start:19102127,end:19110902,audio:0},{filename:"/lib/python3.9/test/test_importlib/source/test_path_hook.py",start:19110902,end:19112092,audio:0},{filename:"/lib/python3.9/test/test_importlib/source/test_source_encoding.py",start:19112092,end:19117422,audio:0},{filename:"/lib/python3.9/test/test_importlib/zipdata01/__init__.py",start:19117422,end:19117422,audio:0},{filename:"/lib/python3.9/test/test_importlib/zipdata01/ziptestdata.zip",start:19117422,end:19118298,audio:0},{filename:"/lib/python3.9/test/test_importlib/zipdata02/__init__.py",start:19118298,end:19118298,audio:0},{filename:"/lib/python3.9/test/test_importlib/zipdata02/ziptestdata.zip",start:19118298,end:19118996,audio:0},{filename:"/lib/python3.9/test/test_zoneinfo/__init__.py",start:19118996,end:19119025,audio:0},{filename:"/lib/python3.9/test/test_zoneinfo/__main__.py",start:19119025,end:19119078,audio:0},{filename:"/lib/python3.9/test/test_zoneinfo/_support.py",start:19119078,end:19122264,audio:0},{filename:"/lib/python3.9/test/test_zoneinfo/test_zoneinfo.py",start:19122264,end:19196005,audio:0},{filename:"/lib/python3.9/test/test_zoneinfo/data/update_test_data.py",start:19196005,end:19199171,audio:0},{filename:"/lib/python3.9/test/test_zoneinfo/data/zoneinfo_data.json",start:19199171,end:19212066,audio:0},{filename:"/lib/python3.9/test/ziptestdata/README.md",start:19212066,end:19213074,audio:0},{filename:"/lib/python3.9/test/ziptestdata/exe_with_z64",start:19213074,end:19214052,audio:0},{filename:"/lib/python3.9/test/ziptestdata/exe_with_zip",start:19214052,end:19215042,audio:0},{filename:"/lib/python3.9/test/ziptestdata/header.sh",start:19215042,end:19215755,audio:0},{filename:"/lib/python3.9/test/ziptestdata/testdata_module_inside_zip.py",start:19215755,end:19215824,audio:0},{filename:"/lib/python3.9/test/test_asyncio/__init__.py",start:19215824,end:19216068,audio:0},{filename:"/lib/python3.9/test/test_asyncio/__main__.py",start:19216068,end:19216126,audio:0},{filename:"/lib/python3.9/test/test_asyncio/echo.py",start:19216126,end:19216274,audio:0},{filename:"/lib/python3.9/test/test_asyncio/echo2.py",start:19216274,end:19216397,audio:0},{filename:"/lib/python3.9/test/test_asyncio/echo3.py",start:19216397,end:19216673,audio:0},{filename:"/lib/python3.9/test/test_asyncio/functional.py",start:19216673,end:19224392,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_asyncio_waitfor.py",start:19224392,end:19225880,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_base_events.py",start:19225880,end:19306150,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_buffered_proto.py",start:19306150,end:19308487,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_context.py",start:19308487,end:19309507,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_events.py",start:19309507,end:19412737,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_futures.py",start:19412737,end:19440639,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_futures2.py",start:19440639,end:19441333,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_locks.py",start:19441333,end:19472369,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_pep492.py",start:19472369,end:19478414,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_proactor_events.py",start:19478414,end:19514338,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_protocols.py",start:19514338,end:19516382,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_queues.py",start:19516382,end:19538049,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_runners.py",start:19538049,end:19543227,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_selector_events.py",start:19543227,end:19591524,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_sendfile.py",start:19591524,end:19612090,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_server.py",start:19612090,end:19616138,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_sock_lowlevel.py",start:19616138,end:19634433,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_sslproto.py",start:19634433,end:19661084,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_streams.py",start:19661084,end:19698205,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_subprocess.py",start:19698205,end:19724787,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_tasks.py",start:19724787,end:19845494,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_threads.py",start:19845494,end:19847970,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_transports.py",start:19847970,end:19851589,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_unix_events.py",start:19851589,end:19919485,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_windows_events.py",start:19919485,end:19928621,audio:0},{filename:"/lib/python3.9/test/test_asyncio/test_windows_utils.py",start:19928621,end:19932784,audio:0},{filename:"/lib/python3.9/test/test_asyncio/utils.py",start:19932784,end:19950201,audio:0},{filename:"/lib/python3.9/test/test_email/__init__.py",start:19950201,end:19956524,audio:0},{filename:"/lib/python3.9/test/test_email/__main__.py",start:19956524,end:19956596,audio:0},{filename:"/lib/python3.9/test/test_email/test__encoded_words.py",start:19956596,end:19963330,audio:0},{filename:"/lib/python3.9/test/test_email/test__header_value_parser.py",start:19963330,end:20089605,audio:0},{filename:"/lib/python3.9/test/test_email/test_asian_codecs.py",start:20089605,end:20092750,audio:0},{filename:"/lib/python3.9/test/test_email/test_contentmanager.py",start:20092750,end:20127419,audio:0},{filename:"/lib/python3.9/test/test_email/test_defect_handling.py",start:20127419,end:20139387,audio:0},{filename:"/lib/python3.9/test/test_email/test_email.py",start:20139387,end:20350918,audio:0},{filename:"/lib/python3.9/test/test_email/test_generator.py",start:20350918,end:20363093,audio:0},{filename:"/lib/python3.9/test/test_email/test_headerregistry.py",start:20363093,end:20427329,audio:0},{filename:"/lib/python3.9/test/test_email/test_inversion.py",start:20427329,end:20429404,audio:0},{filename:"/lib/python3.9/test/test_email/test_message.py",start:20429404,end:20462731,audio:0},{filename:"/lib/python3.9/test/test_email/test_parser.py",start:20462731,end:20467064,audio:0},{filename:"/lib/python3.9/test/test_email/test_pickleable.py",start:20467064,end:20469613,audio:0},{filename:"/lib/python3.9/test/test_email/test_policy.py",start:20469613,end:20485492,audio:0},{filename:"/lib/python3.9/test/test_email/test_utils.py",start:20485492,end:20492154,audio:0},{filename:"/lib/python3.9/test/test_email/torture_test.py",start:20492154,end:20495761,audio:0},{filename:"/lib/python3.9/test/test_email/data/PyBanner048.gif",start:20495761,end:20496715,audio:0},{filename:"/lib/python3.9/test/test_email/data/audiotest.au",start:20496715,end:20524859,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_01.txt",start:20524859,end:20525318,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_02.txt",start:20525318,end:20528130,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_03.txt",start:20528130,end:20528496,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_04.txt",start:20528496,end:20529457,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_05.txt",start:20529457,end:20530015,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_06.txt",start:20530015,end:20531056,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_07.txt",start:20531056,end:20536283,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_08.txt",start:20536283,end:20536737,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_09.txt",start:20536737,end:20537169,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_10.txt",start:20537169,end:20538053,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_11.txt",start:20538053,end:20538195,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_12.txt",start:20538195,end:20538839,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_12a.txt",start:20538839,end:20539485,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_13.txt",start:20539485,end:20544852,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_14.txt",start:20544852,end:20545493,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_15.txt",start:20545493,end:20546799,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_16.txt",start:20546799,end:20552002,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_17.txt",start:20552002,end:20552332,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_18.txt",start:20552332,end:20552562,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_19.txt",start:20552562,end:20553319,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_20.txt",start:20553319,end:20553826,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_21.txt",start:20553826,end:20554202,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_22.txt",start:20554202,end:20556096,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_23.txt",start:20556096,end:20556235,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_24.txt",start:20556235,end:20556392,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_25.txt",start:20556392,end:20561514,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_26.txt",start:20561514,end:20563617,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_27.txt",start:20563617,end:20564195,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_28.txt",start:20564195,end:20564575,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_29.txt",start:20564575,end:20565158,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_30.txt",start:20565158,end:20565480,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_31.txt",start:20565480,end:20565680,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_32.txt",start:20565680,end:20566098,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_33.txt",start:20566098,end:20566848,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_34.txt",start:20566848,end:20567148,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_35.txt",start:20567148,end:20567284,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_36.txt",start:20567284,end:20568100,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_37.txt",start:20568100,end:20568309,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_38.txt",start:20568309,end:20570857,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_39.txt",start:20570857,end:20572812,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_40.txt",start:20572812,end:20573009,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_41.txt",start:20573009,end:20573194,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_42.txt",start:20573194,end:20573507,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_43.txt",start:20573507,end:20582673,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_44.txt",start:20582673,end:20583568,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_45.txt",start:20583568,end:20584533,audio:0},{filename:"/lib/python3.9/test/test_email/data/msg_46.txt",start:20584533,end:20585349,audio:0},{filename:"/lib/python3.9/test/test_json/__init__.py",start:20585349,end:20587485,audio:0},{filename:"/lib/python3.9/test/test_json/__main__.py",start:20587485,end:20587556,audio:0},{filename:"/lib/python3.9/test/test_json/test_decode.py",start:20587556,end:20591775,audio:0},{filename:"/lib/python3.9/test/test_json/test_default.py",start:20591775,end:20592065,audio:0},{filename:"/lib/python3.9/test/test_json/test_dump.py",start:20592065,end:20594474,audio:0},{filename:"/lib/python3.9/test/test_json/test_encode_basestring_ascii.py",start:20594474,end:20596740,audio:0},{filename:"/lib/python3.9/test/test_json/test_enum.py",start:20596740,end:20600774,audio:0},{filename:"/lib/python3.9/test/test_json/test_fail.py",start:20600774,end:20609815,audio:0},{filename:"/lib/python3.9/test/test_json/test_float.py",start:20609815,end:20611026,audio:0},{filename:"/lib/python3.9/test/test_json/test_indent.py",start:20611026,end:20612850,audio:0},{filename:"/lib/python3.9/test/test_json/test_pass1.py",start:20612850,end:20614687,audio:0},{filename:"/lib/python3.9/test/test_json/test_pass2.py",start:20614687,end:20615135,audio:0},{filename:"/lib/python3.9/test/test_json/test_pass3.py",start:20615135,end:20615679,audio:0},{filename:"/lib/python3.9/test/test_json/test_recursion.py",start:20615679,end:20618700,audio:0},{filename:"/lib/python3.9/test/test_json/test_scanstring.py",start:20618700,end:20623355,audio:0},{filename:"/lib/python3.9/test/test_json/test_separators.py",start:20623355,end:20624674,audio:0},{filename:"/lib/python3.9/test/test_json/test_speedups.py",start:20624674,end:20627615,audio:0},{filename:"/lib/python3.9/test/test_json/test_tool.py",start:20627615,end:20634708,audio:0},{filename:"/lib/python3.9/test/test_json/test_unicode.py",start:20634708,end:20638840,audio:0},{filename:"/lib/python3.9/test/test_peg_generator/__init__.py",start:20638840,end:20639011,audio:0},{filename:"/lib/python3.9/test/test_peg_generator/__main__.py",start:20639011,end:20639069,audio:0},{filename:"/lib/python3.9/test/test_peg_generator/test_c_parser.py",start:20639069,end:20655220,audio:0},{filename:"/lib/python3.9/test/test_peg_generator/test_first_sets.py",start:20655220,end:20662302,audio:0},{filename:"/lib/python3.9/test/test_peg_generator/test_pegen.py",start:20662302,end:20691687,audio:0},{filename:"/lib/python3.9/test/test_tools/__init__.py",start:20691687,end:20692865,audio:0},{filename:"/lib/python3.9/test/test_tools/__main__.py",start:20692865,end:20692937,audio:0},{filename:"/lib/python3.9/test/test_tools/test_fixcid.py",start:20692937,end:20695895,audio:0},{filename:"/lib/python3.9/test/test_tools/test_gprof2html.py",start:20695895,end:20696814,audio:0},{filename:"/lib/python3.9/test/test_tools/test_i18n.py",start:20696814,end:20705860,audio:0},{filename:"/lib/python3.9/test/test_tools/test_lll.py",start:20705860,end:20707023,audio:0},{filename:"/lib/python3.9/test/test_tools/test_md5sum.py",start:20707023,end:20709723,audio:0},{filename:"/lib/python3.9/test/test_tools/test_pathfix.py",start:20709723,end:20714212,audio:0},{filename:"/lib/python3.9/test/test_tools/test_pdeps.py",start:20714212,end:20715036,audio:0},{filename:"/lib/python3.9/test/test_tools/test_pindent.py",start:20715036,end:20723660,audio:0},{filename:"/lib/python3.9/test/test_tools/test_reindent.py",start:20723660,end:20724666,audio:0},{filename:"/lib/python3.9/test/test_tools/test_sundry.py",start:20724666,end:20726540,audio:0},{filename:"/lib/python3.9/test/test_warnings/__init__.py",start:20726540,end:20780097,audio:0},{filename:"/lib/python3.9/test/test_warnings/__main__.py",start:20780097,end:20780150,audio:0},{filename:"/lib/python3.9/test/test_warnings/data/import_warning.py",start:20780150,end:20780239,audio:0},{filename:"/lib/python3.9/test/test_warnings/data/stacklevel.py",start:20780239,end:20780479,audio:0}],remote_package_size:9185150,package_uuid:"4a5bd87e-ece9-4508-a397-03be97639268"})})(); \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Download !EXCLUSIVE! Lanre Teriba Atorise Song.md b/spaces/quidiaMuxgu/Expedit-SAM/Download !EXCLUSIVE! Lanre Teriba Atorise Song.md deleted file mode 100644 index d3979d257c2278dd1af401889bc69017a332bade..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Download !EXCLUSIVE! Lanre Teriba Atorise Song.md +++ /dev/null @@ -1,8 +0,0 @@ -

            download lanre teriba atorise song


            Downloadhttps://geags.com/2uCsIp



            -
            -Feb 10, 2022 - Nigerian gospel music minister Lanre Teriba (Atorize) releases new song in September 2021 titled "New Chapter" with Mp3 Download and ... Feb 10, 2022 - Nigerian gospel music minister Lanre Teriba (Atorize) releases new song in September 2021 titled "New Chapter" with Mp3 Download and ... -Feb 10, 2022 - Nigerian gospel music minister Lanre Teriba (Atorize) releases new song in September 2021 titled "New Chapter" with Mp3 Download and ... -Feb 10, 2022 - Nigerian gospel music minister Lanre Teriba (Atorize) releases new song in September 2021 titled "New Chapter" with Mp3 Download and ... 8a78ff9644
            -
            -
            -

            diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Drmsoft Excel To Exe Converter 7.0 Crack.md b/spaces/quidiaMuxgu/Expedit-SAM/Drmsoft Excel To Exe Converter 7.0 Crack.md deleted file mode 100644 index a8f45bf8bd53ceb6978d780eddd0cce17015ba2d..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Drmsoft Excel To Exe Converter 7.0 Crack.md +++ /dev/null @@ -1,20 +0,0 @@ -

            Drmsoft Excel To Exe Converter 7.0 Crack


            DOWNLOAD –––––>>> https://geags.com/2uCqmO



            - -ia - -MS Excel to EXE Converter - Free to use! It is a simple MS Excel to EXE converter that converts any MS Excel worksheet into a stand-alone EXE file. Professional batch converter which allows to convert entire MS Excel worksheets at once as well as individual parts of MS Excel file with ease. One time registration is required. Free download! 6/11/07 - -AnyDesk is a remote desktop software tool that lets you control your desktop from a web browser on another computer. AnyDesk is free, and there are no installation requirements other than a web browser. A Windows Server or Linux daemon, such as FreeNX or Xnest, is required to be installed on the remote computer. This daemon would allow the client to connect to it and launch any desktops. - -Short for AnyDesk Remote Desktop Software, AnyDesk is a browser-based, cross-platform desktop control tool. You can login to your PC from any other computer, and start your desktop session. Best of all, it's free! - -A remote desktop tool for Windows that lets you access your computer from any web browser. All you need is a browser and a web connection. There are no installation requirements and no setup to worry about. Once you download and install AnyDesk, you can access your desktop from any computer on the web. It's easy. Here are some ways you can use AnyDesk: - Access your desktop from any web browser. - Run your desktop application from AnyDesk. - View your desktop from your tablet. - Review files and folders on your desktop using AnyDesk. - Control your desktop. - Access files and folders remotely. - Connect to a PC running AnyDesk. - Remotely restart a PC. - -Activate MS Office 2010 / 2007 / 2003 from AnyDesk client computer software with a PC running any of these operating systems. - -Compatible with Windows 2000 and later operating systems. - -Features: - Choose to start your MS Office application using a PC or an iPad. - Take control of your PC, including logging out. - Open or save files on your desktop using AnyDesk. - Run any program on your desktop. - Run a command-line program on your PC. - View a listing of any open documents, or one of several built-in sets. - Toggle the desktop between multiple views. - Save the state of your desktop. - Export your desktop to other formats. - Import your desktop from other formats. - Drag files to 4fefd39f24
            -
            -
            -

            diff --git a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/demucs/parser.py b/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/demucs/parser.py deleted file mode 100644 index 4e8a19cf976e3c6dfe411da64b8dce3e9a4548e0..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/demucs/parser.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import os -from pathlib import Path - - -def get_parser(): - parser = argparse.ArgumentParser("demucs", description="Train and evaluate Demucs.") - default_raw = None - default_musdb = None - if 'DEMUCS_RAW' in os.environ: - default_raw = Path(os.environ['DEMUCS_RAW']) - if 'DEMUCS_MUSDB' in os.environ: - default_musdb = Path(os.environ['DEMUCS_MUSDB']) - parser.add_argument( - "--raw", - type=Path, - default=default_raw, - help="Path to raw audio, can be faster, see python3 -m demucs.raw to extract.") - parser.add_argument("--no_raw", action="store_const", const=None, dest="raw") - parser.add_argument("-m", - "--musdb", - type=Path, - default=default_musdb, - help="Path to musdb root") - parser.add_argument("--is_wav", action="store_true", - help="Indicate that the MusDB dataset is in wav format (i.e. MusDB-HQ).") - parser.add_argument("--metadata", type=Path, default=Path("metadata/"), - help="Folder where metadata information is stored.") - parser.add_argument("--wav", type=Path, - help="Path to a wav dataset. This should contain a 'train' and a 'valid' " - "subfolder.") - parser.add_argument("--samplerate", type=int, default=44100) - parser.add_argument("--audio_channels", type=int, default=2) - parser.add_argument("--samples", - default=44100 * 10, - type=int, - help="number of samples to feed in") - parser.add_argument("--data_stride", - default=44100, - type=int, - help="Stride for chunks, shorter = longer epochs") - parser.add_argument("-w", "--workers", default=10, type=int, help="Loader workers") - parser.add_argument("--eval_workers", default=2, type=int, help="Final evaluation workers") - parser.add_argument("-d", - "--device", - help="Device to train on, default is cuda if available else cpu") - parser.add_argument("--eval_cpu", action="store_true", help="Eval on test will be run on cpu.") - parser.add_argument("--dummy", help="Dummy parameter, useful to create a new checkpoint file") - parser.add_argument("--test", help="Just run the test pipeline + one validation. " - "This should be a filename relative to the models/ folder.") - parser.add_argument("--test_pretrained", help="Just run the test pipeline + one validation, " - "on a pretrained model. ") - - parser.add_argument("--rank", default=0, type=int) - parser.add_argument("--world_size", default=1, type=int) - parser.add_argument("--master") - - parser.add_argument("--checkpoints", - type=Path, - default=Path("checkpoints"), - help="Folder where to store checkpoints etc") - parser.add_argument("--evals", - type=Path, - default=Path("evals"), - help="Folder where to store evals and waveforms") - parser.add_argument("--save", - action="store_true", - help="Save estimated for the test set waveforms") - parser.add_argument("--logs", - type=Path, - default=Path("logs"), - help="Folder where to store logs") - parser.add_argument("--models", - type=Path, - default=Path("models"), - help="Folder where to store trained models") - parser.add_argument("-R", - "--restart", - action='store_true', - help='Restart training, ignoring previous run') - - parser.add_argument("--seed", type=int, default=42) - parser.add_argument("-e", "--epochs", type=int, default=180, help="Number of epochs") - parser.add_argument("-r", - "--repeat", - type=int, - default=2, - help="Repeat the train set, longer epochs") - parser.add_argument("-b", "--batch_size", type=int, default=64) - parser.add_argument("--lr", type=float, default=3e-4) - parser.add_argument("--mse", action="store_true", help="Use MSE instead of L1") - parser.add_argument("--init", help="Initialize from a pre-trained model.") - - # Augmentation options - parser.add_argument("--no_augment", - action="store_false", - dest="augment", - default=True, - help="No basic data augmentation.") - parser.add_argument("--repitch", type=float, default=0.2, - help="Probability to do tempo/pitch change") - parser.add_argument("--max_tempo", type=float, default=12, - help="Maximum relative tempo change in %% when using repitch.") - - parser.add_argument("--remix_group_size", - type=int, - default=4, - help="Shuffle sources using group of this size. Useful to somewhat " - "replicate multi-gpu training " - "on less GPUs.") - parser.add_argument("--shifts", - type=int, - default=10, - help="Number of random shifts used for the shift trick.") - parser.add_argument("--overlap", - type=float, - default=0.25, - help="Overlap when --split_valid is passed.") - - # See model.py for doc - parser.add_argument("--growth", - type=float, - default=2., - help="Number of channels between two layers will increase by this factor") - parser.add_argument("--depth", - type=int, - default=6, - help="Number of layers for the encoder and decoder") - parser.add_argument("--lstm_layers", type=int, default=2, help="Number of layers for the LSTM") - parser.add_argument("--channels", - type=int, - default=64, - help="Number of channels for the first encoder layer") - parser.add_argument("--kernel_size", - type=int, - default=8, - help="Kernel size for the (transposed) convolutions") - parser.add_argument("--conv_stride", - type=int, - default=4, - help="Stride for the (transposed) convolutions") - parser.add_argument("--context", - type=int, - default=3, - help="Context size for the decoder convolutions " - "before the transposed convolutions") - parser.add_argument("--rescale", - type=float, - default=0.1, - help="Initial weight rescale reference") - parser.add_argument("--no_resample", action="store_false", - default=True, dest="resample", - help="No Resampling of the input/output x2") - parser.add_argument("--no_glu", - action="store_false", - default=True, - dest="glu", - help="Replace all GLUs by ReLUs") - parser.add_argument("--no_rewrite", - action="store_false", - default=True, - dest="rewrite", - help="No 1x1 rewrite convolutions") - parser.add_argument("--normalize", action="store_true") - parser.add_argument("--no_norm_wav", action="store_false", dest='norm_wav', default=True) - - # Tasnet options - parser.add_argument("--tasnet", action="store_true") - parser.add_argument("--split_valid", - action="store_true", - help="Predict chunks by chunks for valid and test. Required for tasnet") - parser.add_argument("--X", type=int, default=8) - - # Other options - parser.add_argument("--show", - action="store_true", - help="Show model architecture, size and exit") - parser.add_argument("--save_model", action="store_true", - help="Skip traning, just save final model " - "for the current checkpoint value.") - parser.add_argument("--save_state", - help="Skip training, just save state " - "for the current checkpoint value. You should " - "provide a model name as argument.") - - # Quantization options - parser.add_argument("--q-min-size", type=float, default=1, - help="Only quantize layers over this size (in MB)") - parser.add_argument( - "--qat", type=int, help="If provided, use QAT training with that many bits.") - - parser.add_argument("--diffq", type=float, default=0) - parser.add_argument( - "--ms-target", type=float, default=162, - help="Model size target in MB, when using DiffQ. Best model will be kept " - "only if it is smaller than this target.") - - return parser - - -def get_name(parser, args): - """ - Return the name of an experiment given the args. Some parameters are ignored, - for instance --workers, as they do not impact the final result. - """ - ignore_args = set([ - "checkpoints", - "deterministic", - "eval", - "evals", - "eval_cpu", - "eval_workers", - "logs", - "master", - "rank", - "restart", - "save", - "save_model", - "save_state", - "show", - "workers", - "world_size", - ]) - parts = [] - name_args = dict(args.__dict__) - for name, value in name_args.items(): - if name in ignore_args: - continue - if value != parser.get_default(name): - if isinstance(value, Path): - parts.append(f"{name}={value.name}") - else: - parts.append(f"{name}={value}") - if parts: - name = " ".join(parts) - else: - name = "default" - return name diff --git a/spaces/r3gm/RVC_HF/app.py b/spaces/r3gm/RVC_HF/app.py deleted file mode 100644 index d54981948ff97fca229a7727aaa7823603d6395a..0000000000000000000000000000000000000000 --- a/spaces/r3gm/RVC_HF/app.py +++ /dev/null @@ -1,3154 +0,0 @@ -import os, sys -os.system("pip install pyworld") # ==0.3.3 - -now_dir = os.getcwd() -sys.path.append(now_dir) -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' -os.environ["OPENBLAS_NUM_THREADS"] = "1" -os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" - -# Download models -shell_script = './tools/dlmodels.sh' -os.system(f'chmod +x {shell_script}') -os.system('apt install git-lfs') -os.system('git lfs install') -os.system('apt-get -y install aria2') -os.system('aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d . -o hubert_base.pt') -try: - return_code = os.system(shell_script) - if return_code == 0: - print("Shell script executed successfully.") - else: - print(f"Shell script failed with return code {return_code}") -except Exception as e: - print(f"An error occurred: {e}") - - -import logging -import shutil -import threading -import lib.globals.globals as rvc_globals -from LazyImport import lazyload -import mdx -from mdx_processing_script import get_model_list,id_to_ptm,prepare_mdx,run_mdx -math = lazyload('math') -import traceback -import warnings -tensorlowest = lazyload('tensorlowest') -from random import shuffle -from subprocess import Popen -from time import sleep -import json -import pathlib - -import fairseq -logging.getLogger("faiss").setLevel(logging.WARNING) -import faiss -gr = lazyload("gradio") -np = lazyload("numpy") -torch = lazyload('torch') -re = lazyload('regex') -SF = lazyload("soundfile") -SFWrite = SF.write -from dotenv import load_dotenv -from sklearn.cluster import MiniBatchKMeans -import datetime - - -from glob import glob1 -import signal -from signal import SIGTERM -import librosa - -from configs.config import Config -from i18n import I18nAuto -from infer.lib.train.process_ckpt import ( - change_info, - extract_small_model, - merge, - show_info, -) -#from infer.modules.uvr5.modules import uvr -from infer.modules.vc.modules import VC -from infer.modules.vc.utils import * -from infer.modules.vc.pipeline import Pipeline -import lib.globals.globals as rvc_globals -math = lazyload('math') -ffmpeg = lazyload('ffmpeg') -import nltk -nltk.download('punkt', quiet=True) -from nltk.tokenize import sent_tokenize -from bark import SAMPLE_RATE - -import easy_infer -import audioEffects -from infer.lib.csvutil import CSVutil - -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM -from infer_uvr5 import _audio_pre_, _audio_pre_new -from MDXNet import MDXNetDereverb -from infer.lib.audio import load_audio - - -from sklearn.cluster import MiniBatchKMeans - -import time -import csv - -from shlex import quote as SQuote - - - - -RQuote = lambda val: SQuote(str(val)) - -tmp = os.path.join(now_dir, "TEMP") -runtime_dir = os.path.join(now_dir, "runtime/Lib/site-packages") -directories = ['logs', 'audios', 'datasets', 'weights', 'audio-others' , 'audio-outputs'] - -shutil.rmtree(tmp, ignore_errors=True) -shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True) -shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % (now_dir), ignore_errors=True) - -os.makedirs(tmp, exist_ok=True) -for folder in directories: - os.makedirs(os.path.join(now_dir, folder), exist_ok=True) - - -os.makedirs(tmp, exist_ok=True) -os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True) -os.makedirs(os.path.join(now_dir, "assets/weights"), exist_ok=True) -os.environ["TEMP"] = tmp -warnings.filterwarnings("ignore") -torch.manual_seed(114514) -logging.getLogger("numba").setLevel(logging.WARNING) - -logger = logging.getLogger(__name__) - - -if not os.path.isdir("csvdb/"): - os.makedirs("csvdb") - frmnt, stp = open("csvdb/formanting.csv", "w"), open("csvdb/stop.csv", "w") - frmnt.close() - stp.close() - -global DoFormant, Quefrency, Timbre - -try: - DoFormant, Quefrency, Timbre = CSVutil("csvdb/formanting.csv", "r", "formanting") - DoFormant = ( - lambda DoFormant: True - if DoFormant.lower() == "true" - else (False if DoFormant.lower() == "false" else DoFormant) - )(DoFormant) -except (ValueError, TypeError, IndexError): - DoFormant, Quefrency, Timbre = False, 1.0, 1.0 - CSVutil("csvdb/formanting.csv", "w+", "formanting", DoFormant, Quefrency, Timbre) - -load_dotenv() -config = Config() -vc = VC(config) - -if config.dml == True: - - def forward_dml(ctx, x, scale): - ctx.scale = scale - res = x.clone().detach() - return res - - fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml - -i18n = I18nAuto() -i18n.print() -# 判断是否有能用来训练和加速推理的N卡 -ngpu = torch.cuda.device_count() -gpu_infos = [] -mem = [] -if_gpu_ok = False - -isinterrupted = 0 - - -if torch.cuda.is_available() or ngpu != 0: - for i in range(ngpu): - gpu_name = torch.cuda.get_device_name(i) - if any( - value in gpu_name.upper() - for value in [ - "10", - "16", - "20", - "30", - "40", - "A2", - "A3", - "A4", - "P4", - "A50", - "500", - "A60", - "70", - "80", - "90", - "M4", - "T4", - "TITAN", - ] - ): - # A10#A100#V100#A40#P40#M40#K80#A4500 - if_gpu_ok = True # 至少有一张能用的N卡 - gpu_infos.append("%s\t%s" % (i, gpu_name)) - mem.append( - int( - torch.cuda.get_device_properties(i).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - ) -if if_gpu_ok and len(gpu_infos) > 0: - gpu_info = "\n".join(gpu_infos) - default_batch_size = min(mem) // 2 -else: - gpu_info = "Unfortunately, there is no compatible GPU available to support your training." - default_batch_size = 1 -gpus = "-".join([i[0] for i in gpu_infos]) - -class ToolButton(gr.Button, gr.components.FormComponent): - """Small button with single emoji as text, fits inside gradio forms""" - - def __init__(self, **kwargs): - super().__init__(variant="tool", **kwargs) - - def get_block_name(self): - return "button" - - -hubert_model = None -weight_root = os.getenv("weight_root") -weight_uvr5_root = os.getenv("weight_uvr5_root") -index_root = os.getenv("index_root") -datasets_root = "datasets" -fshift_root = "formantshiftcfg" -audio_root = "audios" -audio_others_root = "audio-others" - -sup_audioext = {'wav', 'mp3', 'flac', 'ogg', 'opus', - 'm4a', 'mp4', 'aac', 'alac', 'wma', - 'aiff', 'webm', 'ac3'} - -names = [os.path.join(root, file) - for root, _, files in os.walk(weight_root) - for file in files - if file.endswith((".pth", ".onnx"))] - -indexes_list = [os.path.join(root, name) - for root, _, files in os.walk(index_root, topdown=False) - for name in files - if name.endswith(".index") and "trained" not in name] - -audio_paths = [os.path.join(root, name) - for root, _, files in os.walk(audio_root, topdown=False) - for name in files - if name.endswith(tuple(sup_audioext))] - -audio_others_paths = [os.path.join(root, name) - for root, _, files in os.walk(audio_others_root, topdown=False) - for name in files - if name.endswith(tuple(sup_audioext))] - -uvr5_names = [name.replace(".pth", "") - for name in os.listdir(weight_uvr5_root) - if name.endswith(".pth") or "onnx" in name] - - -check_for_name = lambda: sorted(names)[0] if names else '' - -datasets=[] -for foldername in os.listdir(os.path.join(now_dir, datasets_root)): - if "." not in foldername: - datasets.append(os.path.join(easy_infer.find_folder_parent(".","pretrained"),"datasets",foldername)) - -def get_dataset(): - if len(datasets) > 0: - return sorted(datasets)[0] - else: - return '' - -def update_model_choices(select_value): - model_ids = get_model_list() - model_ids_list = list(model_ids) - if select_value == "VR": - return {"choices": uvr5_names, "__type__": "update"} - elif select_value == "MDX": - return {"choices": model_ids_list, "__type__": "update"} - -set_bark_voice = easy_infer.get_bark_voice() -set_edge_voice = easy_infer.get_edge_voice() - -def update_tts_methods_voice(select_value): - #["Edge-tts", "RVG-tts", "Bark-tts"] - if select_value == "Edge-tts": - return {"choices": set_edge_voice, "value": "", "__type__": "update"} - elif select_value == "Bark-tts": - return {"choices": set_bark_voice, "value": "", "__type__": "update"} - - -def update_dataset_list(name): - new_datasets = [] - for foldername in os.listdir(os.path.join(now_dir, datasets_root)): - if "." not in foldername: - new_datasets.append(os.path.join(easy_infer.find_folder_parent(".","pretrained"),"datasets",foldername)) - return gr.Dropdown.update(choices=new_datasets) - -def get_indexes(): - indexes_list = [ - os.path.join(dirpath, filename) - for dirpath, _, filenames in os.walk(index_root) - for filename in filenames - if filename.endswith(".index") and "trained" not in filename - ] - - return indexes_list if indexes_list else '' - -def get_fshift_presets(): - fshift_presets_list = [ - os.path.join(dirpath, filename) - for dirpath, _, filenames in os.walk(fshift_root) - for filename in filenames - if filename.endswith(".txt") - ] - - return fshift_presets_list if fshift_presets_list else '' - -import soundfile as sf - -def generate_output_path(output_folder, base_name, extension): - # Generar un nombre único para el archivo de salida - index = 1 - while True: - output_path = os.path.join(output_folder, f"{base_name}_{index}.{extension}") - if not os.path.exists(output_path): - return output_path - index += 1 - -def combine_and_save_audios(audio1_path, audio2_path, output_path, volume_factor_audio1, volume_factor_audio2): - audio1, sr1 = librosa.load(audio1_path, sr=None) - audio2, sr2 = librosa.load(audio2_path, sr=None) - - # Alinear las tasas de muestreo - if sr1 != sr2: - if sr1 > sr2: - audio2 = librosa.resample(audio2, orig_sr=sr2, target_sr=sr1) - else: - audio1 = librosa.resample(audio1, orig_sr=sr1, target_sr=sr2) - - # Ajustar los audios para que tengan la misma longitud - target_length = min(len(audio1), len(audio2)) - audio1 = librosa.util.fix_length(audio1, target_length) - audio2 = librosa.util.fix_length(audio2, target_length) - - # Ajustar el volumen de los audios multiplicando por el factor de ganancia - if volume_factor_audio1 != 1.0: - audio1 *= volume_factor_audio1 - if volume_factor_audio2 != 1.0: - audio2 *= volume_factor_audio2 - - # Combinar los audios - combined_audio = audio1 + audio2 - - sf.write(output_path, combined_audio, sr1) - -# Resto de tu código... - -# Define función de conversión llamada por el botón -def audio_combined(audio1_path, audio2_path, volume_factor_audio1=1.0, volume_factor_audio2=1.0, reverb_enabled=False, compressor_enabled=False, noise_gate_enabled=False): - output_folder = os.path.join(now_dir, "audio-outputs") - os.makedirs(output_folder, exist_ok=True) - - # Generar nombres únicos para los archivos de salida - base_name = "combined_audio" - extension = "wav" - output_path = generate_output_path(output_folder, base_name, extension) - print(reverb_enabled) - print(compressor_enabled) - print(noise_gate_enabled) - - if reverb_enabled or compressor_enabled or noise_gate_enabled: - # Procesa el primer audio con los efectos habilitados - base_name = "effect_audio" - output_path = generate_output_path(output_folder, base_name, extension) - processed_audio_path = audioEffects.process_audio(audio2_path, output_path, reverb_enabled, compressor_enabled, noise_gate_enabled) - base_name = "combined_audio" - output_path = generate_output_path(output_folder, base_name, extension) - # Combina el audio procesado con el segundo audio usando audio_combined - combine_and_save_audios(audio1_path, processed_audio_path, output_path, volume_factor_audio1, volume_factor_audio2) - - return i18n("Conversion complete!"), output_path - else: - base_name = "combined_audio" - output_path = generate_output_path(output_folder, base_name, extension) - # No hay efectos habilitados, combina directamente los audios sin procesar - combine_and_save_audios(audio1_path, audio2_path, output_path, volume_factor_audio1, volume_factor_audio2) - - return i18n("Conversion complete!"), output_path - - - - -def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0,architecture): - infos = [] - if architecture == "VR": - try: - inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") for x in [inp_root, save_root_vocal, save_root_ins]] - usable_files = [os.path.join(inp_root, file) - for file in os.listdir(inp_root) - if file.endswith(tuple(sup_audioext))] - - - pre_fun = MDXNetDereverb(15) if model_name == "onnx_dereverb_By_FoxJoy" else (_audio_pre_ if "DeEcho" not in model_name else _audio_pre_new)( - agg=int(agg), - model_path=os.path.join(weight_uvr5_root, model_name + ".pth"), - device=config.device, - is_half=config.is_half, - ) - - try: - if paths != None: - paths = [path.name for path in paths] - else: - paths = usable_files - - except: - traceback.print_exc() - paths = usable_files - print(paths) - for path in paths: - inp_path = os.path.join(inp_root, path) - need_reformat, done = 1, 0 - - try: - info = ffmpeg.probe(inp_path, cmd="ffprobe") - if info["streams"][0]["channels"] == 2 and info["streams"][0]["sample_rate"] == "44100": - need_reformat = 0 - pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0) - done = 1 - except: - traceback.print_exc() - - if need_reformat: - tmp_path = f"{tmp}/{os.path.basename(RQuote(inp_path))}.reformatted.wav" - os.system(f"ffmpeg -i {RQuote(inp_path)} -vn -acodec pcm_s16le -ac 2 -ar 44100 {RQuote(tmp_path)} -y") - inp_path = tmp_path - - try: - if not done: - pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0) - infos.append(f"{os.path.basename(inp_path)}->Success") - yield "\n".join(infos) - except: - infos.append(f"{os.path.basename(inp_path)}->{traceback.format_exc()}") - yield "\n".join(infos) - except: - infos.append(traceback.format_exc()) - yield "\n".join(infos) - finally: - try: - if model_name == "onnx_dereverb_By_FoxJoy": - del pre_fun.pred.model - del pre_fun.pred.model_ - else: - del pre_fun.model - - del pre_fun - except: traceback.print_exc() - - print("clean_empty_cache") - - if torch.cuda.is_available(): torch.cuda.empty_cache() - - yield "\n".join(infos) - elif architecture == "MDX": - try: - infos.append(i18n("Starting audio conversion... (This might take a moment)")) - yield "\n".join(infos) - inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") for x in [inp_root, save_root_vocal, save_root_ins]] - - usable_files = [os.path.join(inp_root, file) - for file in os.listdir(inp_root) - if file.endswith(tuple(sup_audioext))] - try: - if paths != None: - paths = [path.name for path in paths] - else: - paths = usable_files - - except: - traceback.print_exc() - paths = usable_files - print(paths) - invert=True - denoise=True - use_custom_parameter=True - dim_f=3072 - dim_t=256 - n_fft=7680 - use_custom_compensation=True - compensation=1.025 - suffix = "Vocals_custom" #@param ["Vocals", "Drums", "Bass", "Other"]{allow-input: true} - suffix_invert = "Instrumental_custom" #@param ["Instrumental", "Drumless", "Bassless", "Instruments"]{allow-input: true} - print_settings = True # @param{type:"boolean"} - onnx = id_to_ptm(model_name) - compensation = compensation if use_custom_compensation or use_custom_parameter else None - mdx_model = prepare_mdx(onnx,use_custom_parameter, dim_f, dim_t, n_fft, compensation=compensation) - - - for path in paths: - #inp_path = os.path.join(inp_root, path) - suffix_naming = suffix if use_custom_parameter else None - diff_suffix_naming = suffix_invert if use_custom_parameter else None - run_mdx(onnx, mdx_model, path, format0, diff=invert,suffix=suffix_naming,diff_suffix=diff_suffix_naming,denoise=denoise) - - if print_settings: - print() - print('[MDX-Net_Colab settings used]') - print(f'Model used: {onnx}') - print(f'Model MD5: {mdx.MDX.get_hash(onnx)}') - print(f'Model parameters:') - print(f' -dim_f: {mdx_model.dim_f}') - print(f' -dim_t: {mdx_model.dim_t}') - print(f' -n_fft: {mdx_model.n_fft}') - print(f' -compensation: {mdx_model.compensation}') - print() - print('[Input file]') - print('filename(s): ') - for filename in paths: - print(f' -{filename}') - infos.append(f"{os.path.basename(filename)}->Success") - yield "\n".join(infos) - except: - infos.append(traceback.format_exc()) - yield "\n".join(infos) - finally: - try: - del mdx_model - except: traceback.print_exc() - - print("clean_empty_cache") - - if torch.cuda.is_available(): torch.cuda.empty_cache() - - - - - -def change_choices(): - names = [os.path.join(root, file) - for root, _, files in os.walk(weight_root) - for file in files - if file.endswith((".pth", ".onnx"))] - indexes_list = [os.path.join(root, name) for root, _, files in os.walk(index_root, topdown=False) for name in files if name.endswith(".index") and "trained" not in name] - audio_paths = [os.path.join(audio_root, file) for file in os.listdir(os.path.join(now_dir, "audios"))] - - - return ( - {"choices": sorted(names), "__type__": "update"}, - {"choices": sorted(indexes_list), "__type__": "update"}, - {"choices": sorted(audio_paths), "__type__": "update"} - ) -def change_choices2(): - names = [os.path.join(root, file) - for root, _, files in os.walk(weight_root) - for file in files - if file.endswith((".pth", ".onnx"))] - indexes_list = [os.path.join(root, name) for root, _, files in os.walk(index_root, topdown=False) for name in files if name.endswith(".index") and "trained" not in name] - - - return ( - {"choices": sorted(names), "__type__": "update"}, - {"choices": sorted(indexes_list), "__type__": "update"}, - ) -def change_choices3(): - - audio_paths = [os.path.join(audio_root, file) for file in os.listdir(os.path.join(now_dir, "audios"))] - audio_others_paths = [os.path.join(audio_others_root, file) for file in os.listdir(os.path.join(now_dir, "audio-others"))] - - - return ( - {"choices": sorted(audio_others_paths), "__type__": "update"}, - {"choices": sorted(audio_paths), "__type__": "update"} - ) - -def clean(): - return {"value": "", "__type__": "update"} -def export_onnx(): - from infer.modules.onnx.export import export_onnx as eo - - eo() - -sr_dict = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -def if_done(done, p): - while 1: - if p.poll() is None: - sleep(0.5) - else: - break - done[0] = True - - -def if_done_multi(done, ps): - while 1: - # poll==None代表进程未结束 - # 只要有一个进程未结束都不停 - flag = 1 - for p in ps: - if p.poll() is None: - flag = 0 - sleep(0.5) - break - if flag == 1: - break - done[0] = True - -def formant_enabled( - cbox, qfrency, tmbre, frmntapply, formantpreset, formant_refresh_button -): - if cbox: - DoFormant = True - CSVutil("csvdb/formanting.csv", "w+", "formanting", DoFormant, qfrency, tmbre) - - # print(f"is checked? - {cbox}\ngot {DoFormant}") - - return ( - {"value": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - ) - - else: - DoFormant = False - CSVutil("csvdb/formanting.csv", "w+", "formanting", DoFormant, qfrency, tmbre) - - # print(f"is checked? - {cbox}\ngot {DoFormant}") - return ( - {"value": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - ) - - -def formant_apply(qfrency, tmbre): - Quefrency = qfrency - Timbre = tmbre - DoFormant = True - CSVutil("csvdb/formanting.csv", "w+", "formanting", DoFormant, qfrency, tmbre) - - return ( - {"value": Quefrency, "__type__": "update"}, - {"value": Timbre, "__type__": "update"}, - ) - -def update_fshift_presets(preset, qfrency, tmbre): - - if preset: - with open(preset, 'r') as p: - content = p.readlines() - qfrency, tmbre = content[0].strip(), content[1] - - formant_apply(qfrency, tmbre) - else: - qfrency, tmbre = preset_apply(preset, qfrency, tmbre) - - return ( - {"choices": get_fshift_presets(), "__type__": "update"}, - {"value": qfrency, "__type__": "update"}, - {"value": tmbre, "__type__": "update"}, - ) - -def preprocess_dataset(trainset_dir, exp_dir, sr, n_p): - sr = sr_dict[sr] - os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True) - f = open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "w") - f.close() - per = 3.0 if config.is_half else 3.7 - cmd = '"%s" infer/modules/train/preprocess.py "%s" %s %s "%s/logs/%s" %s %.1f' % ( - config.python_cmd, - trainset_dir, - sr, - n_p, - now_dir, - exp_dir, - config.noparallel, - per, - ) - logger.info(cmd) - p = Popen(cmd, shell=True) # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 - done = [False] - threading.Thread( - target=if_done, - args=( - done, - p, - ), - ).start() - while 1: - with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f: - yield (f.read()) - sleep(1) - if done[0]: - break - with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f: - log = f.read() - logger.info(log) - yield log - - -def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, echl, gpus_rmvpe): - gpus = gpus.split("-") - os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True) - f = open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "w") - f.close() - if if_f0: - if f0method != "rmvpe_gpu": - cmd = ( - '"%s" infer/modules/train/extract/extract_f0_print.py "%s/logs/%s" %s %s' - % ( - config.python_cmd, - now_dir, - exp_dir, - n_p, - f0method, - echl, - ) - ) - logger.info(cmd) - p = Popen( - cmd, shell=True, cwd=now_dir - ) # , stdin=PIPE, stdout=PIPE,stderr=PIPE - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 - done = [False] - threading.Thread( - target=if_done, - args=( - done, - p, - ), - ).start() - else: - if gpus_rmvpe != "-": - gpus_rmvpe = gpus_rmvpe.split("-") - leng = len(gpus_rmvpe) - ps = [] - for idx, n_g in enumerate(gpus_rmvpe): - cmd = ( - '"%s" infer/modules/train/extract/extract_f0_rmvpe.py %s %s %s "%s/logs/%s" %s ' - % ( - config.python_cmd, - leng, - idx, - n_g, - now_dir, - exp_dir, - config.is_half, - ) - ) - logger.info(cmd) - p = Popen( - cmd, shell=True, cwd=now_dir - ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir - ps.append(p) - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 - done = [False] - threading.Thread( - target=if_done_multi, # - args=( - done, - ps, - ), - ).start() - else: - cmd = ( - config.python_cmd - + ' infer/modules/train/extract/extract_f0_rmvpe_dml.py "%s/logs/%s" ' - % ( - now_dir, - exp_dir, - ) - ) - logger.info(cmd) - p = Popen( - cmd, shell=True, cwd=now_dir - ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir - p.wait() - done = [True] - while 1: - with open( - "%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r" - ) as f: - yield (f.read()) - sleep(1) - if done[0]: - break - with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: - log = f.read() - logger.info(log) - yield log - ####对不同part分别开多进程 - """ - n_part=int(sys.argv[1]) - i_part=int(sys.argv[2]) - i_gpu=sys.argv[3] - exp_dir=sys.argv[4] - os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu) - """ - leng = len(gpus) - ps = [] - for idx, n_g in enumerate(gpus): - cmd = ( - '"%s" infer/modules/train/extract_feature_print.py %s %s %s %s "%s/logs/%s" %s' - % ( - config.python_cmd, - config.device, - leng, - idx, - n_g, - now_dir, - exp_dir, - version19, - ) - ) - logger.info(cmd) - p = Popen( - cmd, shell=True, cwd=now_dir - ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir - ps.append(p) - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 - done = [False] - threading.Thread( - target=if_done_multi, - args=( - done, - ps, - ), - ).start() - while 1: - with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: - yield (f.read()) - sleep(1) - if done[0]: - break - with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: - log = f.read() - logger.info(log) - yield log - -def get_pretrained_models(path_str, f0_str, sr2): - if_pretrained_generator_exist = os.access( - "assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK - ) - if_pretrained_discriminator_exist = os.access( - "assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK - ) - if not if_pretrained_generator_exist: - logger.warn( - "assets/pretrained%s/%sG%s.pth not exist, will not use pretrained model", - path_str, - f0_str, - sr2, - ) - if not if_pretrained_discriminator_exist: - logger.warn( - "assets/pretrained%s/%sD%s.pth not exist, will not use pretrained model", - path_str, - f0_str, - sr2, - ) - return ( - "assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2) - if if_pretrained_generator_exist - else "", - "assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2) - if if_pretrained_discriminator_exist - else "", - ) - -def change_sr2(sr2, if_f0_3, version19): - path_str = "" if version19 == "v1" else "_v2" - f0_str = "f0" if if_f0_3 else "" - return get_pretrained_models(path_str, f0_str, sr2) - - -def change_version19(sr2, if_f0_3, version19): - path_str = "" if version19 == "v1" else "_v2" - if sr2 == "32k" and version19 == "v1": - sr2 = "40k" - to_return_sr2 = ( - {"choices": ["40k", "48k"], "__type__": "update", "value": sr2} - if version19 == "v1" - else {"choices": ["40k", "48k", "32k"], "__type__": "update", "value": sr2} - ) - f0_str = "f0" if if_f0_3 else "" - return ( - *get_pretrained_models(path_str, f0_str, sr2), - to_return_sr2, - ) - - -def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15 - path_str = "" if version19 == "v1" else "_v2" - return ( - {"visible": if_f0_3, "__type__": "update"}, - *get_pretrained_models(path_str, "f0", sr2), - ) - - -global log_interval - -def set_log_interval(exp_dir, batch_size12): - log_interval = 1 - folder_path = os.path.join(exp_dir, "1_16k_wavs") - - if os.path.isdir(folder_path): - wav_files_num = len(glob1(folder_path,"*.wav")) - - if wav_files_num > 0: - log_interval = math.ceil(wav_files_num / batch_size12) - if log_interval > 1: - log_interval += 1 - - return log_interval - -global PID, PROCESS - -def click_train( - exp_dir1, - sr2, - if_f0_3, - spk_id5, - save_epoch10, - total_epoch11, - batch_size12, - if_save_latest13, - pretrained_G14, - pretrained_D15, - gpus16, - if_cache_gpu17, - if_save_every_weights18, - version19, -): - CSVutil("csvdb/stop.csv", "w+", "formanting", False) - # 生成filelist - exp_dir = "%s/logs/%s" % (now_dir, exp_dir1) - os.makedirs(exp_dir, exist_ok=True) - gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir) - feature_dir = ( - "%s/3_feature256" % (exp_dir) - if version19 == "v1" - else "%s/3_feature768" % (exp_dir) - ) - if if_f0_3: - f0_dir = "%s/2a_f0" % (exp_dir) - f0nsf_dir = "%s/2b-f0nsf" % (exp_dir) - names = ( - set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) - & set([name.split(".")[0] for name in os.listdir(feature_dir)]) - & set([name.split(".")[0] for name in os.listdir(f0_dir)]) - & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)]) - ) - else: - names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set( - [name.split(".")[0] for name in os.listdir(feature_dir)] - ) - opt = [] - for name in names: - if if_f0_3: - opt.append( - "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s" - % ( - gt_wavs_dir.replace("\\", "\\\\"), - name, - feature_dir.replace("\\", "\\\\"), - name, - f0_dir.replace("\\", "\\\\"), - name, - f0nsf_dir.replace("\\", "\\\\"), - name, - spk_id5, - ) - ) - else: - opt.append( - "%s/%s.wav|%s/%s.npy|%s" - % ( - gt_wavs_dir.replace("\\", "\\\\"), - name, - feature_dir.replace("\\", "\\\\"), - name, - spk_id5, - ) - ) - fea_dim = 256 if version19 == "v1" else 768 - if if_f0_3: - for _ in range(2): - opt.append( - "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s" - % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5) - ) - else: - for _ in range(2): - opt.append( - "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s" - % (now_dir, sr2, now_dir, fea_dim, spk_id5) - ) - shuffle(opt) - with open("%s/filelist.txt" % exp_dir, "w") as f: - f.write("\n".join(opt)) - logger.debug("Write filelist done") - # 生成config#无需生成config - # cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0" - logger.info("Use gpus: %s", str(gpus16)) - if pretrained_G14 == "": - logger.info("No pretrained Generator") - if pretrained_D15 == "": - logger.info("No pretrained Discriminator") - if version19 == "v1" or sr2 == "40k": - config_path = "v1/%s.json" % sr2 - else: - config_path = "v2/%s.json" % sr2 - config_save_path = os.path.join(exp_dir, "config.json") - if not pathlib.Path(config_save_path).exists(): - with open(config_save_path, "w", encoding="utf-8") as f: - json.dump( - config.json_config[config_path], - f, - ensure_ascii=False, - indent=4, - sort_keys=True, - ) - f.write("\n") - if gpus16: - cmd = ( - '"%s" infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s' - % ( - config.python_cmd, - exp_dir1, - sr2, - 1 if if_f0_3 else 0, - batch_size12, - gpus16, - total_epoch11, - save_epoch10, - "-pg %s" % pretrained_G14 if pretrained_G14 != "" else "", - "-pd %s" % pretrained_D15 if pretrained_D15 != "" else "", - 1 if if_save_latest13 == True else 0, - 1 if if_cache_gpu17 == True else 0, - 1 if if_save_every_weights18 == True else 0, - version19, - ) - ) - else: - cmd = ( - '"%s" infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s' - % ( - config.python_cmd, - exp_dir1, - sr2, - 1 if if_f0_3 else 0, - batch_size12, - total_epoch11, - save_epoch10, - "-pg %s" % pretrained_G14 if pretrained_G14 != "" else "", - "-pd %s" % pretrained_D15 if pretrained_D15 != "" else "", - 1 if if_save_latest13 == True else 0, - 1 if if_cache_gpu17 == True else 0, - 1 if if_save_every_weights18 == True else 0, - version19, - ) - ) - logger.info(cmd) - global p - p = Popen(cmd, shell=True, cwd=now_dir) - global PID - PID = p.pid - - p.wait() - - return i18n("Training is done, check train.log"), {"visible": False, "__type__": "update"}, {"visible": True, "__type__": "update"} - - -def train_index(exp_dir1, version19): - # exp_dir = "%s/logs/%s" % (now_dir, exp_dir1) - exp_dir = "logs/%s" % (exp_dir1) - os.makedirs(exp_dir, exist_ok=True) - feature_dir = ( - "%s/3_feature256" % (exp_dir) - if version19 == "v1" - else "%s/3_feature768" % (exp_dir) - ) - if not os.path.exists(feature_dir): - return "请先进行特征提取!" - listdir_res = list(os.listdir(feature_dir)) - if len(listdir_res) == 0: - return "请先进行特征提取!" - infos = [] - npys = [] - for name in sorted(listdir_res): - phone = np.load("%s/%s" % (feature_dir, name)) - npys.append(phone) - big_npy = np.concatenate(npys, 0) - big_npy_idx = np.arange(big_npy.shape[0]) - np.random.shuffle(big_npy_idx) - big_npy = big_npy[big_npy_idx] - if big_npy.shape[0] > 2e5: - infos.append("Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0]) - yield "\n".join(infos) - try: - big_npy = ( - MiniBatchKMeans( - n_clusters=10000, - verbose=True, - batch_size=256 * config.n_cpu, - compute_labels=False, - init="random", - ) - .fit(big_npy) - .cluster_centers_ - ) - except: - info = traceback.format_exc() - logger.info(info) - infos.append(info) - yield "\n".join(infos) - - np.save("%s/total_fea.npy" % exp_dir, big_npy) - n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39) - infos.append("%s,%s" % (big_npy.shape, n_ivf)) - yield "\n".join(infos) - index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf) - # index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf) - infos.append("training") - yield "\n".join(infos) - index_ivf = faiss.extract_index_ivf(index) # - index_ivf.nprobe = 1 - index.train(big_npy) - faiss.write_index( - index, - "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index" - % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), - ) - - infos.append("adding") - yield "\n".join(infos) - batch_size_add = 8192 - for i in range(0, big_npy.shape[0], batch_size_add): - index.add(big_npy[i : i + batch_size_add]) - faiss.write_index( - index, - "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index" - % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), - ) - infos.append( - "Successful Index Construction,added_IVF%s_Flat_nprobe_%s_%s_%s.index" - % (n_ivf, index_ivf.nprobe, exp_dir1, version19) - ) - # faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19)) - # infos.append("成功构建索引,added_IVF%s_Flat_FastScan_%s.index"%(n_ivf,version19)) - yield "\n".join(infos) - -def change_info_(ckpt_path): - if not os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path), "train.log")): - return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} - try: - with open( - ckpt_path.replace(os.path.basename(ckpt_path), "train.log"), "r" - ) as f: - info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1]) - sr, f0 = info["sample_rate"], info["if_f0"] - version = "v2" if ("version" in info and info["version"] == "v2") else "v1" - return sr, str(f0), version - except: - traceback.print_exc() - return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} - -F0GPUVisible = config.dml == False - - -def change_f0_method(f0method8): - if f0method8 == "rmvpe_gpu": - visible = F0GPUVisible - else: - visible = False - return {"visible": visible, "__type__": "update"} - - - -def export_onnx(model_path, exported_path): - device = torch.device("cpu") - checkpoint = torch.load(model_path, map_location=device) - vec_channels = 256 if checkpoint.get("version", "v1") == "v1" else 768 - - test_inputs = { - "phone": torch.rand(1, 200, vec_channels), - "phone_lengths": torch.LongTensor([200]), - "pitch": torch.randint(5, 255, (1, 200)), - "pitchf": torch.rand(1, 200), - "ds": torch.zeros(1).long(), - "rnd": torch.rand(1, 192, 200) - } - - checkpoint["config"][-3] = checkpoint["weight"]["emb_g.weight"].shape[0] - net_g = SynthesizerTrnMsNSFsidM(*checkpoint["config"], is_half=False, version=checkpoint.get("version", "v1")) - - net_g.load_state_dict(checkpoint["weight"], strict=False) - net_g = net_g.to(device) - - dynamic_axes = {"phone": [1], "pitch": [1], "pitchf": [1], "rnd": [2]} - - torch.onnx.export( - net_g, - tuple(value.to(device) for value in test_inputs.values()), - exported_path, - dynamic_axes=dynamic_axes, - do_constant_folding=False, - opset_version=13, - verbose=False, - input_names=list(test_inputs.keys()), - output_names=["audio"], - ) - return "Finished" - - - -import re as regex -import scipy.io.wavfile as wavfile - -cli_current_page = "HOME" - - -def cli_split_command(com): - exp = r'(?:(?<=\s)|^)"(.*?)"(?=\s|$)|(\S+)' - split_array = regex.findall(exp, com) - split_array = [group[0] if group[0] else group[1] for group in split_array] - return split_array - - -def execute_generator_function(genObject): - for _ in genObject: - pass - - -def cli_infer(com): - # get VC first - com = cli_split_command(com) - model_name = com[0] - source_audio_path = com[1] - output_file_name = com[2] - feature_index_path = com[3] - f0_file = None # Not Implemented Yet - - # Get parameters for inference - speaker_id = int(com[4]) - transposition = float(com[5]) - f0_method = com[6] - crepe_hop_length = int(com[7]) - harvest_median_filter = int(com[8]) - resample = int(com[9]) - mix = float(com[10]) - feature_ratio = float(com[11]) - protection_amnt = float(com[12]) - protect1 = 0.5 - - if com[14] == "False" or com[14] == "false": - DoFormant = False - Quefrency = 0.0 - Timbre = 0.0 - CSVutil( - "csvdb/formanting.csv", "w+", "formanting", DoFormant, Quefrency, Timbre - ) - - else: - DoFormant = True - Quefrency = float(com[15]) - Timbre = float(com[16]) - CSVutil( - "csvdb/formanting.csv", "w+", "formanting", DoFormant, Quefrency, Timbre - ) - - print("Mangio-RVC-Fork Infer-CLI: Starting the inference...") - vc_data = vc.get_vc(model_name, protection_amnt, protect1) - print(vc_data) - print("Mangio-RVC-Fork Infer-CLI: Performing inference...") - conversion_data = vc.vc_single( - speaker_id, - source_audio_path, - source_audio_path, - transposition, - f0_file, - f0_method, - feature_index_path, - feature_index_path, - feature_ratio, - harvest_median_filter, - resample, - mix, - protection_amnt, - crepe_hop_length, - ) - if "Success." in conversion_data[0]: - print( - "Mangio-RVC-Fork Infer-CLI: Inference succeeded. Writing to %s/%s..." - % ("audio-outputs", output_file_name) - ) - wavfile.write( - "%s/%s" % ("audio-outputs", output_file_name), - conversion_data[1][0], - conversion_data[1][1], - ) - print( - "Mangio-RVC-Fork Infer-CLI: Finished! Saved output to %s/%s" - % ("audio-outputs", output_file_name) - ) - else: - print("Mangio-RVC-Fork Infer-CLI: Inference failed. Here's the traceback: ") - print(conversion_data[0]) - - -def cli_pre_process(com): - com = cli_split_command(com) - model_name = com[0] - trainset_directory = com[1] - sample_rate = com[2] - num_processes = int(com[3]) - - print("Mangio-RVC-Fork Pre-process: Starting...") - generator = preprocess_dataset( - trainset_directory, model_name, sample_rate, num_processes - ) - execute_generator_function(generator) - print("Mangio-RVC-Fork Pre-process: Finished") - - -def cli_extract_feature(com): - com = cli_split_command(com) - model_name = com[0] - gpus = com[1] - num_processes = int(com[2]) - has_pitch_guidance = True if (int(com[3]) == 1) else False - f0_method = com[4] - crepe_hop_length = int(com[5]) - version = com[6] # v1 or v2 - - print("Mangio-RVC-CLI: Extract Feature Has Pitch: " + str(has_pitch_guidance)) - print("Mangio-RVC-CLI: Extract Feature Version: " + str(version)) - print("Mangio-RVC-Fork Feature Extraction: Starting...") - generator = extract_f0_feature( - gpus, - num_processes, - f0_method, - has_pitch_guidance, - model_name, - version, - crepe_hop_length, - ) - execute_generator_function(generator) - print("Mangio-RVC-Fork Feature Extraction: Finished") - - -def cli_train(com): - com = cli_split_command(com) - model_name = com[0] - sample_rate = com[1] - has_pitch_guidance = True if (int(com[2]) == 1) else False - speaker_id = int(com[3]) - save_epoch_iteration = int(com[4]) - total_epoch = int(com[5]) # 10000 - batch_size = int(com[6]) - gpu_card_slot_numbers = com[7] - if_save_latest = True if (int(com[8]) == 1) else False - if_cache_gpu = True if (int(com[9]) == 1) else False - if_save_every_weight = True if (int(com[10]) == 1) else False - version = com[11] - - pretrained_base = "pretrained/" if version == "v1" else "pretrained_v2/" - - g_pretrained_path = "%sf0G%s.pth" % (pretrained_base, sample_rate) - d_pretrained_path = "%sf0D%s.pth" % (pretrained_base, sample_rate) - - print("Mangio-RVC-Fork Train-CLI: Training...") - click_train( - model_name, - sample_rate, - has_pitch_guidance, - speaker_id, - save_epoch_iteration, - total_epoch, - batch_size, - if_save_latest, - g_pretrained_path, - d_pretrained_path, - gpu_card_slot_numbers, - if_cache_gpu, - if_save_every_weight, - version, - ) - - -def cli_train_feature(com): - com = cli_split_command(com) - model_name = com[0] - version = com[1] - print("Mangio-RVC-Fork Train Feature Index-CLI: Training... Please wait") - generator = train_index(model_name, version) - execute_generator_function(generator) - print("Mangio-RVC-Fork Train Feature Index-CLI: Done!") - - -def cli_extract_model(com): - com = cli_split_command(com) - model_path = com[0] - save_name = com[1] - sample_rate = com[2] - has_pitch_guidance = com[3] - info = com[4] - version = com[5] - extract_small_model_process = extract_small_model( - model_path, save_name, sample_rate, has_pitch_guidance, info, version - ) - if extract_small_model_process == "Success.": - print("Mangio-RVC-Fork Extract Small Model: Success!") - else: - print(str(extract_small_model_process)) - print("Mangio-RVC-Fork Extract Small Model: Failed!") - - -def preset_apply(preset, qfer, tmbr): - if str(preset) != "": - with open(str(preset), "r") as p: - content = p.readlines() - qfer, tmbr = content[0].split("\n")[0], content[1] - formant_apply(qfer, tmbr) - else: - pass - return ( - {"value": qfer, "__type__": "update"}, - {"value": tmbr, "__type__": "update"}, - ) - - -def print_page_details(): - if cli_current_page == "HOME": - print( - "\n go home : Takes you back to home with a navigation list." - "\n go infer : Takes you to inference command execution." - "\n go pre-process : Takes you to training step.1) pre-process command execution." - "\n go extract-feature : Takes you to training step.2) extract-feature command execution." - "\n go train : Takes you to training step.3) being or continue training command execution." - "\n go train-feature : Takes you to the train feature index command execution." - "\n go extract-model : Takes you to the extract small model command execution." - ) - elif cli_current_page == "INFER": - print( - "\n arg 1) model name with .pth in ./weights: mi-test.pth" - "\n arg 2) source audio path: myFolder\\MySource.wav" - "\n arg 3) output file name to be placed in './audio-outputs': MyTest.wav" - "\n arg 4) feature index file path: logs/mi-test/added_IVF3042_Flat_nprobe_1.index" - "\n arg 5) speaker id: 0" - "\n arg 6) transposition: 0" - "\n arg 7) f0 method: harvest (pm, harvest, crepe, crepe-tiny, hybrid[x,x,x,x], mangio-crepe, mangio-crepe-tiny, rmvpe)" - "\n arg 8) crepe hop length: 160" - "\n arg 9) harvest median filter radius: 3 (0-7)" - "\n arg 10) post resample rate: 0" - "\n arg 11) mix volume envelope: 1" - "\n arg 12) feature index ratio: 0.78 (0-1)" - "\n arg 13) Voiceless Consonant Protection (Less Artifact): 0.33 (Smaller number = more protection. 0.50 means Dont Use.)" - "\n arg 14) Whether to formant shift the inference audio before conversion: False (if set to false, you can ignore setting the quefrency and timbre values for formanting)" - "\n arg 15)* Quefrency for formanting: 8.0 (no need to set if arg14 is False/false)" - "\n arg 16)* Timbre for formanting: 1.2 (no need to set if arg14 is False/false) \n" - "\nExample: mi-test.pth saudio/Sidney.wav myTest.wav logs/mi-test/added_index.index 0 -2 harvest 160 3 0 1 0.95 0.33 0.45 True 8.0 1.2" - ) - elif cli_current_page == "PRE-PROCESS": - print( - "\n arg 1) Model folder name in ./logs: mi-test" - "\n arg 2) Trainset directory: mydataset (or) E:\\my-data-set" - "\n arg 3) Sample rate: 40k (32k, 40k, 48k)" - "\n arg 4) Number of CPU threads to use: 8 \n" - "\nExample: mi-test mydataset 40k 24" - ) - elif cli_current_page == "EXTRACT-FEATURE": - print( - "\n arg 1) Model folder name in ./logs: mi-test" - "\n arg 2) Gpu card slot: 0 (0-1-2 if using 3 GPUs)" - "\n arg 3) Number of CPU threads to use: 8" - "\n arg 4) Has Pitch Guidance?: 1 (0 for no, 1 for yes)" - "\n arg 5) f0 Method: harvest (pm, harvest, dio, crepe)" - "\n arg 6) Crepe hop length: 128" - "\n arg 7) Version for pre-trained models: v2 (use either v1 or v2)\n" - "\nExample: mi-test 0 24 1 harvest 128 v2" - ) - elif cli_current_page == "TRAIN": - print( - "\n arg 1) Model folder name in ./logs: mi-test" - "\n arg 2) Sample rate: 40k (32k, 40k, 48k)" - "\n arg 3) Has Pitch Guidance?: 1 (0 for no, 1 for yes)" - "\n arg 4) speaker id: 0" - "\n arg 5) Save epoch iteration: 50" - "\n arg 6) Total epochs: 10000" - "\n arg 7) Batch size: 8" - "\n arg 8) Gpu card slot: 0 (0-1-2 if using 3 GPUs)" - "\n arg 9) Save only the latest checkpoint: 0 (0 for no, 1 for yes)" - "\n arg 10) Whether to cache training set to vram: 0 (0 for no, 1 for yes)" - "\n arg 11) Save extracted small model every generation?: 0 (0 for no, 1 for yes)" - "\n arg 12) Model architecture version: v2 (use either v1 or v2)\n" - "\nExample: mi-test 40k 1 0 50 10000 8 0 0 0 0 v2" - ) - elif cli_current_page == "TRAIN-FEATURE": - print( - "\n arg 1) Model folder name in ./logs: mi-test" - "\n arg 2) Model architecture version: v2 (use either v1 or v2)\n" - "\nExample: mi-test v2" - ) - elif cli_current_page == "EXTRACT-MODEL": - print( - "\n arg 1) Model Path: logs/mi-test/G_168000.pth" - "\n arg 2) Model save name: MyModel" - "\n arg 3) Sample rate: 40k (32k, 40k, 48k)" - "\n arg 4) Has Pitch Guidance?: 1 (0 for no, 1 for yes)" - '\n arg 5) Model information: "My Model"' - "\n arg 6) Model architecture version: v2 (use either v1 or v2)\n" - '\nExample: logs/mi-test/G_168000.pth MyModel 40k 1 "Created by Cole Mangio" v2' - ) - -def change_page(page): - global cli_current_page - cli_current_page = page - return 0 - -def execute_command(com): - if com == "go home": - return change_page("HOME") - elif com == "go infer": - return change_page("INFER") - elif com == "go pre-process": - return change_page("PRE-PROCESS") - elif com == "go extract-feature": - return change_page("EXTRACT-FEATURE") - elif com == "go train": - return change_page("TRAIN") - elif com == "go train-feature": - return change_page("TRAIN-FEATURE") - elif com == "go extract-model": - return change_page("EXTRACT-MODEL") - else: - if com[:3] == "go ": - print("page '%s' does not exist!" % com[3:]) - return 0 - - if cli_current_page == "INFER": - cli_infer(com) - elif cli_current_page == "PRE-PROCESS": - cli_pre_process(com) - elif cli_current_page == "EXTRACT-FEATURE": - cli_extract_feature(com) - elif cli_current_page == "TRAIN": - cli_train(com) - elif cli_current_page == "TRAIN-FEATURE": - cli_train_feature(com) - elif cli_current_page == "EXTRACT-MODEL": - cli_extract_model(com) - -def cli_navigation_loop(): - while True: - print("\nYou are currently in '%s':" % cli_current_page) - print_page_details() - command = input("%s: " % cli_current_page) - try: - execute_command(command) - except: - print(traceback.format_exc()) - - -if config.is_cli: - print("\n\nMangio-RVC-Fork v2 CLI App!\n") - print( - "Welcome to the CLI version of RVC. Please read the documentation on https://github.com/Mangio621/Mangio-RVC-Fork (README.MD) to understand how to use this app.\n" - ) - cli_navigation_loop() - - - - - -def switch_pitch_controls(f0method0): - is_visible = f0method0 != 'rmvpe' - - if rvc_globals.NotesOrHertz: - return ( - {"visible": False, "__type__": "update"}, - {"visible": is_visible, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": is_visible, "__type__": "update"} - ) - else: - return ( - {"visible": is_visible, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": is_visible, "__type__": "update"}, - {"visible": False, "__type__": "update"} - ) - -def match_index(sid0): - picked = False - # folder = sid0.split('.')[0] - - # folder = re.split(r'. |_', sid0)[0] - folder = sid0.split(".")[0].split("_")[0] - # folder_test = sid0.split('.')[0].split('_')[0].split('-')[0] - parent_dir = "./logs/" + folder - # print(parent_dir) - if os.path.exists(parent_dir): - # print('path exists') - for filename in os.listdir(parent_dir.replace("\\", "/")): - if filename.endswith(".index"): - for i in range(len(indexes_list)): - if indexes_list[i] == ( - os.path.join(("./logs/" + folder), filename).replace("\\", "/") - ): - # print('regular index found') - break - else: - if indexes_list[i] == ( - os.path.join( - ("./logs/" + folder.lower()), filename - ).replace("\\", "/") - ): - # print('lowered index found') - parent_dir = "./logs/" + folder.lower() - break - # elif (indexes_list[i]).casefold() == ((os.path.join(("./logs/" + folder), filename).replace('\\','/')).casefold()): - # print('8') - # parent_dir = "./logs/" + folder.casefold() - # break - # elif (indexes_list[i]) == ((os.path.join(("./logs/" + folder_test), filename).replace('\\','/'))): - # parent_dir = "./logs/" + folder_test - # print(parent_dir) - # break - # elif (indexes_list[i]) == (os.path.join(("./logs/" + folder_test.lower()), filename).replace('\\','/')): - # parent_dir = "./logs/" + folder_test - # print(parent_dir) - # break - # else: - # #print('couldnt find index') - # continue - - # print('all done') - index_path = os.path.join( - parent_dir.replace("\\", "/"), filename.replace("\\", "/") - ).replace("\\", "/") - # print(index_path) - return (index_path, index_path) - - else: - # print('nothing found') - return ("", "") - -def stoptraining(mim): - if int(mim) == 1: - CSVutil("csvdb/stop.csv", "w+", "stop", "True") - # p.terminate() - # p.kill() - try: - os.kill(PID, signal.SIGTERM) - except Exception as e: - print(f"Couldn't click due to {e}") - pass - else: - pass - - return ( - {"visible": False, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - ) - -weights_dir = 'weights/' - -def note_to_hz(note_name): - SEMITONES = {'C': -9, 'C#': -8, 'D': -7, 'D#': -6, 'E': -5, 'F': -4, 'F#': -3, 'G': -2, 'G#': -1, 'A': 0, 'A#': 1, 'B': 2} - pitch_class, octave = note_name[:-1], int(note_name[-1]) - semitone = SEMITONES[pitch_class] - note_number = 12 * (octave - 4) + semitone - frequency = 440.0 * (2.0 ** (1.0/12)) ** note_number - return frequency - -def save_to_wav(record_button): - if record_button is None: - pass - else: - path_to_file=record_button - new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav' - new_path='./audios/'+new_name - shutil.move(path_to_file,new_path) - return new_name -def save_to_wav2_edited(dropbox): - if dropbox is None: - pass - else: - file_path = dropbox.name - target_path = os.path.join('audios', os.path.basename(file_path)) - - if os.path.exists(target_path): - os.remove(target_path) - print('Replacing old dropdown file...') - - shutil.move(file_path, target_path) - return -def save_to_wav2(dropbox): - file_path = dropbox.name - target_path = os.path.join('audios', os.path.basename(file_path)) - - if os.path.exists(target_path): - os.remove(target_path) - print('Replacing old dropdown file...') - - shutil.move(file_path, target_path) - return target_path - -from gtts import gTTS -import edge_tts -import asyncio - - - - -def custom_voice( - _values, # filter indices - audio_files, # all audio files - model_voice_path='', - transpose=0, - f0method='pm', - index_rate_=float(0.66), - crepe_hop_length_=float(64), - f0_autotune=False, - file_index='', - file_index2='', - ): - - vc.get_vc(model_voice_path) - - - for _value_item in _values: - filename = "audio2/"+audio_files[_value_item] if _value_item != "converted_tts" else audio_files[0] - #filename = "audio2/"+audio_files[_value_item] - try: - print(audio_files[_value_item], model_voice_path) - except: - pass - info_, (sample_, audio_output_) = vc.vc_single_dont_save( - sid=0, - input_audio_path0=filename, #f"audio2/{filename}", - input_audio_path1=filename, #f"audio2/{filename}", - f0_up_key=transpose, # transpose for m to f and reverse 0 12 - f0_file=None, - f0_method= f0method, - file_index= file_index, # dir pwd? - file_index2= file_index2, - # file_big_npy1, - index_rate= index_rate_, - filter_radius= int(3), - resample_sr= int(0), - rms_mix_rate= float(0.25), - protect= float(0.33), - crepe_hop_length= crepe_hop_length_, - f0_autotune=f0_autotune, - f0_min=50, - note_min=50, - f0_max=1100, - note_max=1100 - ) - - sf.write( - file= filename, #f"audio2/{filename}", - samplerate=sample_, - data=audio_output_ - ) -def cast_to_device(tensor, device): - try: - return tensor.to(device) - except Exception as e: - print(e) - return tensor - - -def __bark__(text, voice_preset): - os.makedirs(os.path.join(now_dir,"tts"), exist_ok=True) - from transformers import AutoProcessor, BarkModel - device = "cuda:0" if torch.cuda.is_available() else "cpu" - dtype = torch.float32 if "cpu" in device else torch.float16 - bark_processor = AutoProcessor.from_pretrained( - "suno/bark", - cache_dir=os.path.join(now_dir,"tts","suno/bark"), - torch_dtype=dtype) - bark_model = BarkModel.from_pretrained( - "suno/bark", - cache_dir=os.path.join(now_dir,"tts","suno/bark"), - torch_dtype=dtype).to(device) - # bark_model.enable_cpu_offload() - inputs = bark_processor( - text=[text], - return_tensors="pt", - voice_preset=voice_preset - ) - tensor_dict = {k: cast_to_device(v,device) if hasattr(v,"to") else v for k, v in inputs.items()} - speech_values = bark_model.generate(**tensor_dict, do_sample=True) - sampling_rate = bark_model.generation_config.sample_rate - speech = speech_values.cpu().numpy().squeeze() - return speech, sampling_rate - - - -def make_test( - tts_text, - tts_voice, - model_path, - index_path, - transpose, - f0_method, - index_rate, - crepe_hop_length, - f0_autotune, - tts_method - ): - - if tts_voice == None: - return - - filename = os.path.join(now_dir, "audio-outputs", "converted_tts.wav") - if "SET_LIMIT" == os.getenv("DEMO"): - if len(tts_text) > 60: - tts_text = tts_text[:60] - print("DEMO; limit to 60 characters") - - language = tts_voice[:2] - if tts_method == "Edge-tts": - try: - #nest_asyncio.apply() # gradio;not - asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save(filename)) - except: - try: - tts = gTTS(tts_text, lang=language) - tts.save(filename) - tts.save - print(f'No audio was received. Please change the tts voice for {tts_voice}. USING gTTS.') - except: - tts = gTTS('a', lang=language) - tts.save(filename) - print('Error: Audio will be replaced.') - - os.system("cp audio-outputs/converted_tts.wav audio-outputs/real_tts.wav") - - custom_voice( - ["converted_tts"], # filter indices - ["audio-outputs/converted_tts.wav"], # all audio files - model_voice_path=model_path, - transpose=transpose, - f0method=f0_method, - index_rate_=index_rate, - crepe_hop_length_=crepe_hop_length, - f0_autotune=f0_autotune, - file_index='', - file_index2=index_path, - ) - return os.path.join(now_dir, "audio-outputs", "converted_tts.wav"), os.path.join(now_dir, "audio-outputs", "real_tts.wav") - elif tts_method == "Bark-tts": - try: - - script = tts_text.replace("\n", " ").strip() - sentences = sent_tokenize(script) - print(sentences) - silence = np.zeros(int(0.25 * SAMPLE_RATE)) - pieces = [] - nombre_archivo = os.path.join(now_dir, "audio-outputs", "bark_out.wav") - for sentence in sentences: - audio_array , _ = __bark__(sentence, tts_voice.split("-")[0]) - pieces += [audio_array, silence.copy()] - - sf.write( - file= nombre_archivo, - samplerate=SAMPLE_RATE, - data=np.concatenate(pieces) - ) - vc.get_vc(model_path) - info_, (sample_, audio_output_) = vc.vc_single_dont_save( - sid=0, - input_audio_path0=os.path.join(now_dir, "audio-outputs", "bark_out.wav"), #f"audio2/{filename}", - input_audio_path1=os.path.join(now_dir, "audio-outputs", "bark_out.wav"), #f"audio2/{filename}", - f0_up_key=transpose, # transpose for m to f and reverse 0 12 - f0_file=None, - f0_method=f0_method, - file_index= '', # dir pwd? - file_index2= index_path, - # file_big_npy1, - index_rate= index_rate, - filter_radius= int(3), - resample_sr= int(0), - rms_mix_rate= float(0.25), - protect= float(0.33), - crepe_hop_length= crepe_hop_length, - f0_autotune=f0_autotune, - f0_min=50, - note_min=50, - f0_max=1100, - note_max=1100 - ) - wavfile.write(os.path.join(now_dir, "audio-outputs", "converted_bark.wav"), rate=sample_, data=audio_output_) - return os.path.join(now_dir, "audio-outputs", "converted_bark.wav"), nombre_archivo - - except Exception as e: - print(f"{e}") - return None, None - - - - - - -def GradioSetup(UTheme=gr.themes.Soft()): - - default_weight = names[0] if names else '' - - with gr.Blocks(theme='JohnSmith9982/small_and_pretty', title="Applio") as app: - gr.Markdown("🍏 Applio (Mangio-RVC-Fork HF)") - gr.Markdown("More spaces: [Aesthetic_RVC_Inference_HF](https://huggingface.co/spaces/r3gm/Aesthetic_RVC_Inference_HF), [AICoverGen](https://huggingface.co/spaces/r3gm/AICoverGen), [Ultimate-Vocal-Remover-WebUI](https://huggingface.co/spaces/r3gm/Ultimate-Vocal-Remover-WebUI), [Advanced-RVC-Inference](https://huggingface.co/spaces/r3gm/Advanced-RVC-Inference)") - gr.HTML("

            The current space only uses CPU, so it's only for inference. If you have issues with the queue, I recommend duplicating the space.

            ") - gr.Markdown( - "[![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm-dark.svg)](https://huggingface.co/spaces/r3gm/RVC_HF?duplicate=true)\n\n" - ) - with gr.Tabs(): - with gr.TabItem(i18n("Model Inference")): - with gr.Row(): - sid0 = gr.Dropdown(label=i18n("Inferencing voice:"), choices=sorted(names), value=default_weight) - refresh_button = gr.Button(i18n("Refresh"), variant="primary") - clean_button = gr.Button(i18n("Unload voice to save GPU memory"), variant="primary") - clean_button.click(fn=lambda: ({"value": "", "__type__": "update"}), inputs=[], outputs=[sid0]) - - - with gr.TabItem(i18n("Single")): - with gr.Row(): - spk_item = gr.Slider( - minimum=0, - maximum=2333, - step=1, - label=i18n("Select Speaker/Singer ID:"), - value=0, - visible=False, - interactive=True, - ) - - - with gr.Group(): - with gr.Row(): - with gr.Column(): # First column for audio-related inputs - dropbox = gr.File(label=i18n("Drag your audio here:")) - record_button=gr.Audio(source="microphone", label=i18n("Or record an audio:"), type="filepath") - input_audio0 = gr.Textbox( - label=i18n("Manual path to the audio file to be processed"), - value=os.path.join(now_dir, "audios", "someguy.mp3"), - visible=False - ) - input_audio1 = gr.Dropdown( - label=i18n("Auto detect audio path and select from the dropdown:"), - choices=sorted(audio_paths), - value='', - interactive=True, - ) - - input_audio1.select(fn=lambda:'',inputs=[],outputs=[input_audio0]) - input_audio0.input(fn=lambda:'',inputs=[],outputs=[input_audio1]) - - dropbox.upload(fn=save_to_wav2, inputs=[dropbox], outputs=[input_audio0]) - dropbox.upload(fn=easy_infer.change_choices2, inputs=[], outputs=[input_audio1]) - record_button.change(fn=save_to_wav, inputs=[record_button], outputs=[input_audio0]) - record_button.change(fn=easy_infer.change_choices2, inputs=[], outputs=[input_audio1]) - - best_match_index_path1 = match_index(sid0.value) # Get initial index from default sid0 (first voice model in list) - - with gr.Column(): # Second column for pitch shift and other options - file_index2 = gr.Dropdown( - label=i18n("Auto-detect index path and select from the dropdown:"), - choices=get_indexes(), - value=best_match_index_path1, - interactive=True, - allow_custom_value=True, - ) - index_rate1 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("Search feature ratio:"), - value=0.75, - interactive=True, - ) - refresh_button.click( - fn=change_choices, inputs=[], outputs=[sid0, file_index2, input_audio1] - ) - with gr.Column(): - vc_transform0 = gr.Number( - label=i18n("Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):"), value=0 - ) - - # Create a checkbox for advanced settings - advanced_settings_checkbox = gr.Checkbox( - value=False, - label=i18n("Advanced Settings"), - interactive=True, - ) - - # Advanced settings container - with gr.Column(visible=False) as advanced_settings: # Initially hidden - with gr.Row(label = i18n("Advanced Settings"), open = False): - with gr.Column(): - f0method0 = gr.Radio( - label=i18n( - "Select the pitch extraction algorithm:" - ), - choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny", "rmvpe", "rmvpe+"], - value="rmvpe+", - interactive=True, - ) - f0_autotune = gr.Checkbox( - label="Enable autotune", - interactive=True - ) - crepe_hop_length = gr.Slider( - minimum=1, - maximum=512, - step=1, - label=i18n("Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate."), - value=120, - interactive=True, - visible=False, - ) - filter_radius0 = gr.Slider( - minimum=0, - maximum=7, - label=i18n("If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness."), - value=3, - step=1, - interactive=True, - ) - - minpitch_slider = gr.Slider( - label = i18n("Min pitch:"), - info = i18n("Specify minimal pitch for inference [HZ]"), - step = 0.1, - minimum = 1, - scale = 0, - value = 50, - maximum = 16000, - interactive = True, - visible = (not rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'), - ) - minpitch_txtbox = gr.Textbox( - label = i18n("Min pitch:"), - info = i18n("Specify minimal pitch for inference [NOTE][OCTAVE]"), - placeholder = "C5", - visible = (rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'), - interactive = True, - ) - - maxpitch_slider = gr.Slider( - label = i18n("Max pitch:"), - info = i18n("Specify max pitch for inference [HZ]"), - step = 0.1, - minimum = 1, - scale = 0, - value = 1100, - maximum = 16000, - interactive = True, - visible = (not rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'), - ) - maxpitch_txtbox = gr.Textbox( - label = i18n("Max pitch:"), - info = i18n("Specify max pitch for inference [NOTE][OCTAVE]"), - placeholder = "C6", - visible = (rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'), - interactive = True, - ) - - with gr.Column(): - file_index1 = gr.Textbox( - label=i18n("Feature search database file path:"), - value="", - interactive=True, - ) - - with gr.Accordion(label = i18n("Custom f0 [Root pitch] File"), open = False): - f0_file = gr.File(label=i18n("F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:")) - - f0method0.change( - fn=lambda radio: ( - { - "visible": radio in ['mangio-crepe', 'mangio-crepe-tiny'], - "__type__": "update" - } - ), - inputs=[f0method0], - outputs=[crepe_hop_length] - ) - - f0method0.change( - fn=switch_pitch_controls, - inputs=[f0method0], - outputs=[minpitch_slider, minpitch_txtbox, - maxpitch_slider, maxpitch_txtbox] - ) - - with gr.Column(): - resample_sr0 = gr.Slider( - minimum=0, - maximum=48000, - label=i18n("Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:"), - value=0, - step=1, - interactive=True, - ) - rms_mix_rate0 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:"), - value=0.25, - interactive=True, - ) - protect0 = gr.Slider( - minimum=0, - maximum=0.5, - label=i18n( - "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:" - ), - value=0.33, - step=0.01, - interactive=True, - ) - formanting = gr.Checkbox( - value=bool(DoFormant), - label=i18n("Formant shift inference audio"), - info=i18n("Used for male to female and vice-versa conversions"), - interactive=True, - visible=True, - ) - - formant_preset = gr.Dropdown( - value='', - choices=get_fshift_presets(), - label=i18n("Browse presets for formanting"), - info=i18n("Presets are located in formantshiftcfg/ folder"), - visible=bool(DoFormant), - ) - - formant_refresh_button = gr.Button( - value='\U0001f504', - visible=bool(DoFormant), - variant='primary', - ) - - qfrency = gr.Slider( - value=Quefrency, - info=i18n("Default value is 1.0"), - label=i18n("Quefrency for formant shifting"), - minimum=0.0, - maximum=16.0, - step=0.1, - visible=bool(DoFormant), - interactive=True, - ) - - tmbre = gr.Slider( - value=Timbre, - info=i18n("Default value is 1.0"), - label=i18n("Timbre for formant shifting"), - minimum=0.0, - maximum=16.0, - step=0.1, - visible=bool(DoFormant), - interactive=True, - ) - frmntbut = gr.Button( - "Apply", variant="primary", visible=bool(DoFormant) - ) - - formant_preset.change( - fn=preset_apply, - inputs=[formant_preset, qfrency, tmbre], - outputs=[qfrency, tmbre], - ) - formanting.change( - fn=formant_enabled, - inputs=[ - formanting, - qfrency, - tmbre, - frmntbut, - formant_preset, - formant_refresh_button, - ], - outputs=[ - formanting, - qfrency, - tmbre, - frmntbut, - formant_preset, - formant_refresh_button, - ], - ) - frmntbut.click( - fn=formant_apply, - inputs=[qfrency, tmbre], - outputs=[qfrency, tmbre], - ) - formant_refresh_button.click( - fn=update_fshift_presets, - inputs=[formant_preset, qfrency, tmbre], - outputs=[formant_preset, qfrency, tmbre], - ) - - # Function to toggle advanced settings - def toggle_advanced_settings(checkbox): - return {"visible": checkbox, "__type__": "update"} - - # Attach the change event - advanced_settings_checkbox.change( - fn=toggle_advanced_settings, - inputs=[advanced_settings_checkbox], - outputs=[advanced_settings] - ) - - - but0 = gr.Button(i18n("Convert"), variant="primary").style(full_width=True) - - with gr.Row(): # Defines output info + output audio download after conversion - vc_output1 = gr.Textbox(label=i18n("Output information:")) - vc_output2 = gr.Audio(label=i18n("Export audio (click on the three dots in the lower right corner to download)")) - - with gr.Group(): # I think this defines the big convert button - with gr.Row(): - but0.click( - vc.vc_single, - [ - spk_item, - input_audio0, - input_audio1, - vc_transform0, - f0_file, - f0method0, - file_index1, - file_index2, - index_rate1, - filter_radius0, - resample_sr0, - rms_mix_rate0, - protect0, - crepe_hop_length, - minpitch_slider, minpitch_txtbox, - maxpitch_slider, maxpitch_txtbox, - f0_autotune - ], - [vc_output1, vc_output2], - ) - - - with gr.TabItem(i18n("Batch")): # Dont Change - with gr.Group(): # Markdown explanation of batch inference - gr.Markdown( - value=i18n("Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').") - ) - with gr.Row(): - with gr.Column(): - vc_transform1 = gr.Number( - label=i18n("Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):"), value=0 - ) - opt_input = gr.Textbox(label=i18n("Specify output folder:"), value="opt") - with gr.Column(): - file_index4 = gr.Dropdown( - label=i18n("Auto-detect index path and select from the dropdown:"), - choices=get_indexes(), - value=best_match_index_path1, - interactive=True, - ) - sid0.select(fn=match_index, inputs=[sid0], outputs=[file_index2, file_index4]) - - refresh_button.click( - fn=lambda: change_choices()[1], - inputs=[], - outputs=file_index4, - ) - index_rate2 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("Search feature ratio:"), - value=0.75, - interactive=True, - ) - with gr.Row(): - dir_input = gr.Textbox( - label=i18n("Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):"), - value=os.path.join(now_dir, "audios"), - ) - inputs = gr.File( - file_count="multiple", label=i18n("You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.") - ) - - with gr.Row(): - with gr.Column(): - # Create a checkbox for advanced batch settings - advanced_settings_batch_checkbox = gr.Checkbox( - value=False, - label=i18n("Advanced Settings"), - interactive=True, - ) - - # Advanced batch settings container - with gr.Row(visible=False) as advanced_settings_batch: # Initially hidden - with gr.Row(label = i18n("Advanced Settings"), open = False): - with gr.Column(): - file_index3 = gr.Textbox( - label=i18n("Feature search database file path:"), - value="", - interactive=True, - ) - - f0method1 = gr.Radio( - label=i18n( - "Select the pitch extraction algorithm:" - ), - choices=["pm", "harvest", "crepe", "rmvpe"], - value="rmvpe", - interactive=True, - ) - f0_autotune = gr.Checkbox( - label="Enable autotune", - interactive=True - ) - filter_radius1 = gr.Slider( - minimum=0, - maximum=7, - label=i18n("If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness."), - value=3, - step=1, - interactive=True, - ) - - with gr.Row(): - format1 = gr.Radio( - label=i18n("Export file format"), - choices=["wav", "flac", "mp3", "m4a"], - value="wav", - interactive=True, - ) - - - with gr.Column(): - resample_sr1 = gr.Slider( - minimum=0, - maximum=48000, - label=i18n("Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:"), - value=0, - step=1, - interactive=True, - ) - rms_mix_rate1 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:"), - value=1, - interactive=True, - ) - protect1 = gr.Slider( - minimum=0, - maximum=0.5, - label=i18n( - "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:" - ), - value=0.33, - step=0.01, - interactive=True, - ) - vc_output3 = gr.Textbox(label=i18n("Output information:")) - but1 = gr.Button(i18n("Convert"), variant="primary") - but1.click( - vc.vc_multi, - [ - spk_item, - dir_input, - opt_input, - inputs, - vc_transform1, - f0method1, - file_index3, - file_index4, - index_rate2, - filter_radius1, - resample_sr1, - rms_mix_rate1, - protect1, - format1, - crepe_hop_length, - minpitch_slider if (not rvc_globals.NotesOrHertz) else minpitch_txtbox, - maxpitch_slider if (not rvc_globals.NotesOrHertz) else maxpitch_txtbox, - f0_autotune - ], - [vc_output3], - ) - - sid0.change( - fn=vc.get_vc, - inputs=[sid0, protect0, protect1], - outputs=[spk_item, protect0, protect1], - ) - if not sid0.value == '': - spk_item, protect0, protect1 = vc.get_vc(sid0.value, protect0, protect1) - - #spk_item, protect0, protect1 = vc.get_vc(sid0.value, protect0, protect1) - - # Function to toggle advanced settings - def toggle_advanced_settings_batch(checkbox): - return {"visible": checkbox, "__type__": "update"} - - # Attach the change event - advanced_settings_batch_checkbox.change( - fn=toggle_advanced_settings_batch, - inputs=[advanced_settings_batch_checkbox], - outputs=[advanced_settings_batch] - ) - - - with gr.TabItem(i18n("Train")): - - - with gr.Accordion(label=i18n("Step 1: Processing data")): - with gr.Row(): - exp_dir1 = gr.Textbox(label=i18n("Enter the model name:"), value=i18n("Model_Name")) - sr2 = gr.Radio( - label=i18n("Target sample rate:"), - choices=["40k", "48k", "32k"], - value="40k", - interactive=True, - ) - if_f0_3 = gr.Checkbox( - label=i18n("Whether the model has pitch guidance."), - value=True, - interactive=True, - ) - version19 = gr.Radio( - label=i18n("Version:"), - choices=["v1", "v2"], - value="v2", - interactive=True, - visible=True, - ) - np7 = gr.Slider( - minimum=0, - maximum=config.n_cpu, - step=1, - label=i18n("Number of CPU processes:"), - value=int(np.ceil(config.n_cpu / 1.5)), - interactive=True, - ) - with gr.Group(): - with gr.Accordion(label=i18n("Step 2: Skipping pitch extraction")): - - with gr.Row(): - # trainset_dir4 = gr.Textbox( - # label=i18n("Enter the path of the training folder:"), value=os.path.join(now_dir, datasets_root) - # ) - with gr.Column(): - trainset_dir4 = gr.Dropdown(choices=sorted(datasets), label=i18n("Select your dataset:"), value=get_dataset()) - btn_update_dataset_list = gr.Button(i18n("Update list"), variant="primary") - spk_id5 = gr.Slider( - minimum=0, - maximum=4, - step=1, - label=i18n("Specify the model ID:"), - value=0, - interactive=True, - ) - btn_update_dataset_list.click( - easy_infer.update_dataset_list, [spk_id5], trainset_dir4 - ) - but1 = gr.Button(i18n("Process data"), variant="primary") - info1 = gr.Textbox(label=i18n("Output information:"), value="") - but1.click( - preprocess_dataset, [trainset_dir4, exp_dir1, sr2, np7], [info1] - ) - with gr.Group(): - with gr.Accordion(label=i18n("Step 3: Extracting features")): - with gr.Row(): - with gr.Column(): - gpus6 = gr.Textbox( - label=i18n("Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:"), - value=gpus, - interactive=True, - ) - gpu_info9 = gr.Textbox( - label=i18n("GPU Information:"), value=gpu_info, visible=F0GPUVisible - ) - with gr.Column(): - f0method8 = gr.Radio( - label=i18n( - "Select the pitch extraction algorithm:" - ), - choices=["pm", "harvest", "dio", "crepe", "mangio-crepe", "rmvpe", "rmvpe_gpu"], - # [ MANGIO ]: Fork feature: Crepe on f0 extraction for training. - value="rmvpe", - interactive=True, - ) - gpus_rmvpe = gr.Textbox( - label=i18n( - "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程" - ), - value="%s-%s" % (gpus, gpus), - interactive=True, - visible=F0GPUVisible, - ) - - extraction_crepe_hop_length = gr.Slider( - minimum=1, - maximum=512, - step=1, - label=i18n("Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate."), - value=64, - interactive=True, - visible=False, - ) - - f0method8.change( - fn=lambda radio: ( - { - "visible": radio in ['mangio-crepe', 'mangio-crepe-tiny'], - "__type__": "update" - } - ), - inputs=[f0method8], - outputs=[extraction_crepe_hop_length] - ) - f0method8.change( - fn=change_f0_method, - inputs=[f0method8], - outputs=[gpus_rmvpe], - ) - but2 = gr.Button(i18n("Feature extraction"), variant="primary") - info2 = gr.Textbox(label=i18n("Output information:"), value="", max_lines=8, interactive=False) - but2.click( - extract_f0_feature, - [gpus6, np7, f0method8, if_f0_3, exp_dir1, version19, extraction_crepe_hop_length, gpus_rmvpe,], - [info2], - ) - with gr.Group(): - with gr.Row(): - with gr.Accordion(label=i18n("Step 4: Model training started")): - with gr.Row(): - save_epoch10 = gr.Slider( - minimum=1, - maximum=100, - step=1, - label=i18n("Save frequency:"), - value=10, - interactive=True, - visible=True, - ) - total_epoch11 = gr.Slider( - minimum=1, - maximum=10000, - step=2, - label=i18n("Training epochs:"), - value=750, - interactive=True, - ) - batch_size12 = gr.Slider( - minimum=1, - maximum=50, - step=1, - label=i18n("Batch size per GPU:"), - value=default_batch_size, - #value=20, - interactive=True, - ) - - with gr.Row(): - if_save_latest13 = gr.Checkbox( - label=i18n("Whether to save only the latest .ckpt file to save hard drive space"), - value=True, - interactive=True, - ) - if_cache_gpu17 = gr.Checkbox( - label=i18n("Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training"), - value=False, - interactive=True, - ) - if_save_every_weights18 = gr.Checkbox( - label=i18n("Save a small final model to the 'weights' folder at each save point"), - value=True, - interactive=True, - ) - - with gr.Row(): - pretrained_G14 = gr.Textbox( - lines=4, - label=i18n("Load pre-trained base model G path:"), - value="assets/pretrained_v2/f0G40k.pth", - interactive=True, - ) - pretrained_D15 = gr.Textbox( - lines=4, - label=i18n("Load pre-trained base model D path:"), - value="assets/pretrained_v2/f0D40k.pth", - interactive=True, - ) - gpus16 = gr.Textbox( - label=i18n("Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:"), - value=gpus, - interactive=True, - ) - sr2.change( - change_sr2, - [sr2, if_f0_3, version19], - [pretrained_G14, pretrained_D15], - ) - version19.change( - change_version19, - [sr2, if_f0_3, version19], - [pretrained_G14, pretrained_D15, sr2], - ) - if_f0_3.change( - fn=change_f0, - inputs=[if_f0_3, sr2, version19], - outputs=[f0method8, pretrained_G14, pretrained_D15], - ) - if_f0_3.change(fn=lambda radio: ( - { - "visible": radio in ['mangio-crepe', 'mangio-crepe-tiny'], - "__type__": "update" - } - ), inputs=[f0method8], outputs=[extraction_crepe_hop_length]) - - butstop = gr.Button(i18n("Stop training"), - variant='primary', - visible=False, - ) - but3 = gr.Button(i18n("Train model"), variant="primary", visible=True) - but3.click(fn=stoptraining, inputs=[gr.Number(value=0, visible=False)], outputs=[but3, butstop]) - butstop.click(fn=stoptraining, inputs=[gr.Number(value=1, visible=False)], outputs=[but3, butstop]) - - - with gr.Column(): - info3 = gr.Textbox(label=i18n("Output information:"), value="", max_lines=4) - save_action = gr.Dropdown(label=i18n("Save type"), choices=[i18n("Save all"),i18n("Save D and G"),i18n("Save voice")], value=i18n("Choose the method"), interactive=True) - - but7 = gr.Button(i18n("Save model"), variant="primary") - but4 = gr.Button(i18n("Train feature index"), variant="primary") - - - - if_save_every_weights18.change( - fn=lambda if_save_every_weights: ( - { - "visible": if_save_every_weights, - "__type__": "update" - } - ), - inputs=[if_save_every_weights18], - outputs=[save_epoch10] - ) - - but3.click( - click_train, - [ - exp_dir1, - sr2, - if_f0_3, - spk_id5, - save_epoch10, - total_epoch11, - batch_size12, - if_save_latest13, - pretrained_G14, - pretrained_D15, - gpus16, - if_cache_gpu17, - if_save_every_weights18, - version19, - ], - [info3, butstop, but3], - ) - - but4.click(train_index, [exp_dir1, version19], info3) - but7.click(easy_infer.save_model, [exp_dir1, save_action], info3) - with gr.Group(): - with gr.Row(): - with gr.Accordion(label=i18n("Step 5: Export lowest points on a graph of the model")): - - lowestval_weight_dir = gr.Textbox(visible=False) - ds = gr.Textbox(visible=False) - weights_dir1 = gr.Textbox(visible=False, value=weights_dir) - - - with gr.Row(): - amntlastmdls = gr.Slider( - minimum=1, - maximum=25, - label=i18n('How many lowest points to save:'), - value=3, - step=1, - interactive=True, - ) - lpexport = gr.Button( - value=i18n('Export lowest points of a model'), - variant='primary', - ) - lw_mdls = gr.File( - file_count="multiple", - label=i18n("Output models:"), - interactive=False, - ) ##### - - with gr.Row(): - infolpex = gr.Textbox(label=i18n("Output information:"), value="", max_lines=10) - mdlbl = gr.Dataframe(label=i18n('Stats of selected models:'), datatype='number', type='pandas') - - lpexport.click( - lambda model_name: os.path.join("logs", model_name, "lowestvals"), - inputs=[exp_dir1], - outputs=[lowestval_weight_dir] - ) - - lpexport.click(fn=tensorlowest.main, inputs=[exp_dir1, save_epoch10, amntlastmdls], outputs=[ds]) - - ds.change( - fn=tensorlowest.selectweights, - inputs=[exp_dir1, ds, weights_dir1, lowestval_weight_dir], - outputs=[infolpex, lw_mdls, mdlbl], - ) - with gr.TabItem(i18n("UVR5")): # UVR section - with gr.Group(): - with gr.Row(): - with gr.Column(): - model_select = gr.Radio( - label=i18n("Model Architecture:"), - choices=["VR", "MDX"], - value="VR", - interactive=True, - ) - dir_wav_input = gr.Textbox( - label=i18n("Enter the path of the audio folder to be processed:"), - value=os.path.join(now_dir, "audios") - ) - wav_inputs = gr.File( - file_count="multiple", label=i18n("You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.") - ) - - with gr.Column(): - model_choose = gr.Dropdown(label=i18n("Model:"), choices=uvr5_names) - agg = gr.Slider( - minimum=0, - maximum=20, - step=1, - label="Vocal Extraction Aggressive", - value=10, - interactive=True, - visible=False, - ) - opt_vocal_root = gr.Textbox( - label=i18n("Specify the output folder for vocals:"), value="opt" - ) - opt_ins_root = gr.Textbox( - label=i18n("Specify the output folder for accompaniment:"), value="opt" - ) - format0 = gr.Radio( - label=i18n("Export file format:"), - choices=["wav", "flac", "mp3", "m4a"], - value="flac", - interactive=True, - ) - model_select.change( - fn=update_model_choices, - inputs=model_select, - outputs=model_choose, - ) - but2 = gr.Button(i18n("Convert"), variant="primary") - vc_output4 = gr.Textbox(label=i18n("Output information:")) - #wav_inputs.upload(fn=save_to_wav2_edited, inputs=[wav_inputs], outputs=[]) - but2.click( - uvr, - [ - model_choose, - dir_wav_input, - opt_vocal_root, - wav_inputs, - opt_ins_root, - agg, - format0, - model_select - ], - [vc_output4], - ) - with gr.TabItem(i18n("TTS")): - with gr.Group(): - with gr.Column(): - text_test = gr.Textbox(label=i18n("Text:"), placeholder=i18n("Enter the text you want to convert to voice..."), lines=6) - - with gr.Group(): - with gr.Row(): - with gr.Column(): - tts_methods_voice = ["Edge-tts", "Bark-tts"] - ttsmethod_test = gr.Dropdown(tts_methods_voice, value='Edge-tts', label = i18n('TTS Method:'), visible=True) - tts_test = gr.Dropdown(set_edge_voice, label = i18n('TTS Model:'), visible=True) - ttsmethod_test.change( - fn=update_tts_methods_voice, - inputs=ttsmethod_test, - outputs=tts_test, - ) - - with gr.Column(): - model_voice_path07 = gr.Dropdown(label=i18n('RVC Model:'), choices=sorted(names), value=default_weight) - best_match_index_path1 = match_index(model_voice_path07.value) - - file_index2_07 = gr.Dropdown( - label=i18n('Select the .index file:'), - choices=get_indexes(), - value=best_match_index_path1, - interactive=True, - allow_custom_value=True, - ) - #transpose_test = gr.Number(label = i18n('Transpose (integer, number Fof semitones, raise by an octave: 12, lower by an octave: -12):'), value=0, visible=True, interactive= True) - - - - - with gr.Row(): - refresh_button_ = gr.Button(i18n("Refresh"), variant="primary") - refresh_button_.click(fn=change_choices2, inputs=[], outputs=[model_voice_path07, file_index2_07]) - with gr.Row(): - original_ttsvoice = gr.Audio(label=i18n('Audio TTS:')) - ttsvoice = gr.Audio(label=i18n('Audio RVC:')) - - with gr.Row(): - button_test = gr.Button(i18n("Convert"), variant="primary") - - - button_test.click(make_test, inputs=[ - text_test, - tts_test, - model_voice_path07, - file_index2_07, - #transpose_test, - vc_transform0, - f0method8, - index_rate1, - crepe_hop_length, - f0_autotune, - ttsmethod_test - ], outputs=[ttsvoice, original_ttsvoice]) - - with gr.TabItem(i18n("Resources")): - gr.Markdown(f"Limit download size is {os.getenv('MAX_DOWNLOAD_SIZE')} MB, duplicate the space for modify the limit") - easy_infer.download_model() - easy_infer.download_backup() - easy_infer.download_dataset(trainset_dir4) - easy_infer.download_audio() - easy_infer.youtube_separator() - with gr.TabItem(i18n("Extra")): - gr.Markdown( - value=i18n("This section contains some extra utilities that often may be in experimental phases") - ) - with gr.TabItem(i18n("Merge Audios")): - with gr.Group(): - gr.Markdown( - value="## " + i18n("Merge your generated audios with the instrumental") - ) - gr.Markdown(value=".",visible=True) - gr.Markdown(value=".",visible=True) - with gr.Row(): - with gr.Column(): - dropbox = gr.File(label=i18n("Drag your audio here:")) - gr.Markdown(value=i18n("### Instrumental settings:")) - input_audio1 = gr.Dropdown( - label=i18n("Choose your instrumental:"), - choices=sorted(audio_others_paths), - value='', - interactive=True, - ) - input_audio1_scale = gr.Slider( - minimum=0, - maximum=10, - label=i18n("Volume of the instrumental audio:"), - value=1.00, - interactive=True, - ) - gr.Markdown(value=i18n("### Audio settings:")) - input_audio3 = gr.Dropdown( - label=i18n("Select the generated audio"), - choices=sorted(audio_paths), - value='', - interactive=True, - ) - with gr.Row(): - input_audio3_scale = gr.Slider( - minimum=0, - maximum=10, - label=i18n("Volume of the generated audio:"), - value=1.00, - interactive=True, - ) - - gr.Markdown(value=i18n("### Add the effects:")) - reverb_ = gr.Checkbox( - label=i18n("Reverb"), - value=False, - interactive=True, - ) - compressor_ = gr.Checkbox( - label=i18n("Compressor"), - value=False, - interactive=True, - ) - noise_gate_ = gr.Checkbox( - label=i18n("Noise Gate"), - value=False, - interactive=True, - ) - - butnone = gr.Button(i18n("Merge"), variant="primary").style(full_width=True) - - vc_output1 = gr.Textbox(label=i18n("Output information:")) - vc_output2 = gr.Audio(label=i18n("Export audio (click on the three dots in the lower right corner to download)"), type='filepath') - - dropbox.upload(fn=save_to_wav2, inputs=[dropbox], outputs=[input_audio1]) - dropbox.upload(fn=easy_infer.change_choices2, inputs=[], outputs=[input_audio1]) - - refresh_button.click( - fn=lambda: change_choices3(), - inputs=[], - outputs=[input_audio1, input_audio3], - ) - - butnone.click( - fn=audio_combined, - inputs=[input_audio1, input_audio3,input_audio1_scale,input_audio3_scale,reverb_,compressor_,noise_gate_], - outputs=[vc_output1, vc_output2] - ) - - - with gr.TabItem(i18n("Processing")): - with gr.Group(): - - with gr.Accordion(label=i18n("Model fusion, can be used to test timbre fusion")): - with gr.Row(): - with gr.Column(): - name_to_save0 = gr.Textbox( - label=i18n("Name:"), - value="", - max_lines=1, - interactive=True, - placeholder=i18n("Name for saving") - ) - alpha_a = gr.Slider( - minimum=0, - maximum=1, - label=i18n("Weight for Model A:"), - value=0.5, - interactive=True, - ) - if_f0_ = gr.Checkbox( - label=i18n("Whether the model has pitch guidance."), - value=True, - interactive=True, - ) - version_2 = gr.Radio( - label=i18n("Model architecture version:"), - choices=["v1", "v2"], - value="v2", - interactive=True, - ) - sr_ = gr.Radio( - label=i18n("Target sample rate:"), - choices=["40k", "48k"], - value="40k", - interactive=True, - ) - - - with gr.Column(): - ckpt_a = gr.Textbox(label=i18n("Path to Model A:"), value="", interactive=True, placeholder=i18n("Path to model")) - - ckpt_b = gr.Textbox(label=i18n("Path to Model B:"), value="", interactive=True, placeholder=i18n("Path to model")) - - info__ = gr.Textbox( - label=i18n("Model information to be placed:"), value="", max_lines=8, interactive=True, placeholder=i18n("Model information to be placed") - ) - info4 = gr.Textbox(label=i18n("Output information:"), value="", max_lines=8) - - - but6 = gr.Button(i18n("Fusion"), variant="primary") - - but6.click( - merge, - [ - ckpt_a, - ckpt_b, - alpha_a, - sr_, - if_f0_, - info__, - name_to_save0, - version_2, - ], - info4, - ) # def merge(path1,path2,alpha1,sr,f0,info): - with gr.Group(): - with gr.Accordion(label=i18n("Modify model information")): - with gr.Row(): ###### - with gr.Column(): - ckpt_path0 = gr.Textbox( - label=i18n("Path to Model:"), value="", interactive=True, placeholder=i18n("Path to model") - ) - info_ = gr.Textbox( - label=i18n("Model information to be modified:"), value="", max_lines=8, interactive=True, placeholder=i18n("Model information to be placed") - ) - - with gr.Column(): - name_to_save1 = gr.Textbox( - label=i18n("Save file name:"), - placeholder=i18n("Name for saving"), - value="", - max_lines=8, - interactive=True, - - ) - - info5 = gr.Textbox(label=i18n("Output information:"), value="", max_lines=8) - but7 = gr.Button(i18n("Modify"), variant="primary") - but7.click(change_info, [ckpt_path0, info_, name_to_save1], info5) - with gr.Group(): - with gr.Accordion(label=i18n("View model information")): - with gr.Row(): - with gr.Column(): - ckpt_path1 = gr.Textbox( - label=i18n("Path to Model:"), value="", interactive=True, placeholder=i18n("Path to model") - ) - - info6 = gr.Textbox(label=i18n("Output information:"), value="", max_lines=8) - but8 = gr.Button(i18n("View"), variant="primary") - but8.click(show_info, [ckpt_path1], info6) - with gr.Group(): - with gr.Accordion(label=i18n("Model extraction")): - with gr.Row(): - with gr.Column(): - save_name = gr.Textbox( - label=i18n("Name:"), value="", interactive=True, placeholder=i18n("Name for saving") - ) - if_f0__ = gr.Checkbox( - label=i18n("Whether the model has pitch guidance."), - value=True, - interactive=True, - ) - version_1 = gr.Radio( - label=i18n("Model architecture version:"), - choices=["v1", "v2"], - value="v2", - interactive=True, - ) - sr__ = gr.Radio( - label=i18n("Target sample rate:"), - choices=["32k", "40k", "48k"], - value="40k", - interactive=True, - ) - - with gr.Column(): - ckpt_path2 = gr.Textbox( - - label=i18n("Path to Model:"), - placeholder=i18n("Path to model"), - interactive=True, - ) - info___ = gr.Textbox( - label=i18n("Model information to be placed:"), value="", max_lines=8, interactive=True, placeholder=i18n("Model information to be placed") - ) - info7 = gr.Textbox(label=i18n("Output information:"), value="", max_lines=8) - - with gr.Row(): - - but9 = gr.Button(i18n("Extract"), variant="primary") - ckpt_path2.change( - change_info_, [ckpt_path2], [sr__, if_f0__, version_1] - ) - but9.click( - extract_small_model, - [ckpt_path2, save_name, sr__, if_f0__, info___, version_1], - info7, - ) - - - - - with gr.TabItem(i18n("Settings")): - with gr.Row(): - gr.Markdown(value= - i18n("Pitch settings") - ) - noteshertz = gr.Checkbox( - label = i18n("Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz"), - value = rvc_globals.NotesOrHertz, - interactive = True, - ) - - noteshertz.change(fn=lambda nhertz: rvc_globals.__setattr__('NotesOrHertz', nhertz), inputs=[noteshertz], outputs=[]) - - noteshertz.change( - fn=switch_pitch_controls, - inputs=[f0method0], - outputs=[ - minpitch_slider, minpitch_txtbox, - maxpitch_slider, maxpitch_txtbox,] - ) - return app - -def GradioRun(app): - share_gradio_link = config.iscolab or config.paperspace - concurrency_count = 511 - max_size = 1022 - - if ( - config.iscolab or config.paperspace - ): - app.queue(concurrency_count=concurrency_count, max_size=max_size).launch( - favicon_path="./images/icon.png", - ) - else: - app.queue(concurrency_count=concurrency_count, max_size=max_size).launch( - favicon_path=".\images\icon.png", - ) - -if __name__ == "__main__": - if os.name == 'nt': - print(i18n("Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n")) - app = GradioSetup(UTheme=config.grtheme) - GradioRun(app) \ No newline at end of file diff --git a/spaces/radames/Candle-T5-Generation-Wasm/index.html b/spaces/radames/Candle-T5-Generation-Wasm/index.html deleted file mode 100644 index e5b38dee453fe188bc6e00af496377e0e8af7587..0000000000000000000000000000000000000000 --- a/spaces/radames/Candle-T5-Generation-Wasm/index.html +++ /dev/null @@ -1,283 +0,0 @@ - - - - Candle T5 - - - - - - - - - - - - - - - - - - -
            - 🕯️ -
            -

            Candle T5 Transformer

            -

            Rust/WASM Demo

            -

            - This demo showcase Text-To-Text Transfer Transformer (T5) models right in your browser, thanks to - - Candle - - ML framework and rust/wasm. You can choose from a range of available - models, including - - t5-small, - t5-base, - flan-t5-small, - several - - t5 quantized gguf models, and also a quantized - - CoEdIT model for text rewrite. -

            -
            - -
            - - -
            - -
            -

            Task Prefix:

            -
            -
            -
            - - - -
            -
            - - - - 0.00 - - - - 1.00 - - - - - 1.10 - - - -
            -
            -

            Generation:

            -
            -

            No output yet

            -
            -
            -
            - - diff --git a/spaces/radames/Detecting-Photoshopped-Faces-FALdetector/utils/__init__.py b/spaces/radames/Detecting-Photoshopped-Faces-FALdetector/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/radames/NYTimes-homepage-rearranged/static/_app/pages/index.svelte-a06f0dfd.js b/spaces/radames/NYTimes-homepage-rearranged/static/_app/pages/index.svelte-a06f0dfd.js deleted file mode 100644 index fa00dcab1007c309e78680beb5b896b846f97bf6..0000000000000000000000000000000000000000 --- a/spaces/radames/NYTimes-homepage-rearranged/static/_app/pages/index.svelte-a06f0dfd.js +++ /dev/null @@ -1,43 +0,0 @@ -import{S as Zt,i as ea,s as ta,e as d,k as E,t as p,c as f,a as u,d as i,m as S,h as m,b as l,g as Q,J as e,j as ge,K as ce,L as Kt,M as Rt,N as aa,O as Ut,P as qt,Q as la,R as sa,q as Pe,o as Ce,T as ra,U as na,V as oa,w as ia,x as ca,y as ha,B as ua,n as da,p as fa,W as pa,X as Wt,Y as Gt}from"../chunks/vendor-528a52ee.js";function ma(n){let t,a,c;return{c(){t=d("a"),a=d("div"),this.h()},l(s){t=f(s,"A",{target:!0,href:!0});var h=u(t);a=f(h,"DIV",{class:!0}),u(a).forEach(i),h.forEach(i),this.h()},h(){l(a,"class","hover:opacity-60 bg-gray-200 h-full w-full max-w-[15rem] md:max-w-md object-cover object-top aspect-[4/3]"),l(t,"target","_blank"),l(t,"href",c=n[0].link)},m(s,h){Q(s,t,h),e(t,a)},p(s,h){h&1&&c!==(c=s[0].link)&&l(t,"href",c)},d(s){s&&i(t)}}}function _a(n){let t,a,c,s,h;return{c(){t=d("a"),a=d("img"),this.h()},l(r){t=f(r,"A",{target:!0,href:!0});var o=u(t);a=f(o,"IMG",{class:!0,src:!0,alt:!0,loading:!0}),o.forEach(i),this.h()},h(){l(a,"class","hover:opacity-60 m-0 w-full max-w-[15rem] md:max-w-md object-cover object-top aspect-[4/3]"),Kt(a.src,c=n[0].media_content[0].url)||l(a,"src",c),l(a,"alt",s=n[0].title),l(a,"loading","lazy"),l(t,"target","_blank"),l(t,"href",h=n[0].link)},m(r,o){Q(r,t,o),e(t,a)},p(r,o){o&1&&!Kt(a.src,c=r[0].media_content[0].url)&&l(a,"src",c),o&1&&s!==(s=r[0].title)&&l(a,"alt",s),o&1&&h!==(h=r[0].link)&&l(t,"href",h)},d(r){r&&i(t)}}}function ga(n){let t,a,c,s,h,r,o,_=n[0].sentiment.toFixed(4)+"",A,C,j,w,b,q=n[0].title+"",L,he,Y,W,K=n[0].author+"",X,B,ue,x,D=n[0].summary+"",Z;function pe(k,v){return k[0].media_content?_a:ma}let H=pe(n),T=H(n);return{c(){t=d("div"),a=d("div"),T.c(),c=E(),s=d("div"),h=d("div"),r=p(`Score: - `),o=d("span"),A=p(_),j=E(),w=d("a"),b=d("h2"),L=p(q),he=E(),Y=d("h5"),W=p("By "),X=p(K),ue=E(),x=d("p"),this.h()},l(k){t=f(k,"DIV",{class:!0});var v=u(t);a=f(v,"DIV",{});var G=u(a);T.l(G),G.forEach(i),c=S(v),s=f(v,"DIV",{class:!0});var ee=u(s);h=f(ee,"DIV",{class:!0});var de=u(h);r=m(de,`Score: - `),o=f(de,"SPAN",{class:!0});var te=u(o);A=m(te,_),te.forEach(i),de.forEach(i),j=S(ee),w=f(ee,"A",{target:!0,class:!0,href:!0});var R=u(w);b=f(R,"H2",{class:!0});var me=u(b);L=m(me,q),me.forEach(i),he=S(R),Y=f(R,"H5",{class:!0});var N=u(Y);W=m(N,"By "),X=m(N,K),N.forEach(i),ue=S(R),x=f(R,"P",{class:!0});var ve=u(x);ve.forEach(i),R.forEach(i),ee.forEach(i),v.forEach(i),this.h()},h(){l(o,"class",C="font-bold "+(n[0].sentiment>0?"text-emerald-600":"text-red-600")),l(h,"class","text-sm"),l(b,"class","m-0 font-serif leading-tight hover:opacity-50"),l(Y,"class",B="mt-1 leading-tight "+(n[0].author?"visibile":"invisible")),l(x,"class","prose max-w-prose leading-normal prose-gray"),l(w,"target","_blank"),l(w,"class","no-underline"),l(w,"href",Z=n[0].link),l(s,"class","col-span-2 sm:pl-4"),l(t,"class","group grid grid-cols-1 sm:grid-cols-3")},m(k,v){Q(k,t,v),e(t,a),T.m(a,null),e(t,c),e(t,s),e(s,h),e(h,r),e(h,o),e(o,A),e(s,j),e(s,w),e(w,b),e(b,L),e(w,he),e(w,Y),e(Y,W),e(Y,X),e(w,ue),e(w,x),x.innerHTML=D},p(k,[v]){H===(H=pe(k))&&T?T.p(k,v):(T.d(1),T=H(k),T&&(T.c(),T.m(a,null))),v&1&&_!==(_=k[0].sentiment.toFixed(4)+"")&&ge(A,_),v&1&&C!==(C="font-bold "+(k[0].sentiment>0?"text-emerald-600":"text-red-600"))&&l(o,"class",C),v&1&&q!==(q=k[0].title+"")&&ge(L,q),v&1&&K!==(K=k[0].author+"")&&ge(X,K),v&1&&B!==(B="mt-1 leading-tight "+(k[0].author?"visibile":"invisible"))&&l(Y,"class",B),v&1&&D!==(D=k[0].summary+"")&&(x.innerHTML=D),v&1&&Z!==(Z=k[0].link)&&l(w,"href",Z)},i:ce,o:ce,d(k){k&&i(t),T.d()}}}function va(n,t,a){let{feedEntry:c}=t;return n.$$set=s=>{"feedEntry"in s&&a(0,c=s.feedEntry)},[c]}class ba extends Zt{constructor(t){super();ea(this,t,va,ga,ta,{feedEntry:0})}}function Jt(n,t,a){const c=n.slice();return c[9]=t[a],c[11]=a,c}function $t(n,t,a){const c=n.slice();return c[13]=t[a],c}function Qt(n,t){let a,c=t[13].label+"",s,h;return{key:n,first:null,c(){a=d("option"),s=p(c),this.h()},l(r){a=f(r,"OPTION",{});var o=u(a);s=m(o,c),o.forEach(i),this.h()},h(){a.__value=h=t[13].value,a.value=a.__value,this.first=a},m(r,o){Q(r,a,o),e(a,s)},p(r,o){t=r},d(r){r&&i(a)}}}function wa(n){let t,a;return{c(){t=d("p"),a=p("An error occurred!")},l(c){t=f(c,"P",{});var s=u(t);a=m(s,"An error occurred!"),s.forEach(i)},m(c,s){Q(c,t,s),e(t,a)},p:ce,i:ce,o:ce,d(c){c&&i(t)}}}function ka(n){let t,a,c=n[1],s=[];for(let r=0;rCe(s[r],1,1,()=>{s[r]=null});return{c(){t=d("ul");for(let r=0;ry[13].value;for(let y=0;yn[7].call(O)),l(ne,"class","max-w-prose leading-normal"),l(Ie,"class","py-4"),l(oe,"class",Be=(n[3]?"bg-emerald-600":"bg-red-600")+" hover:bg-zinc-300 text-white font-bold py-2 px-4 rounded"),l(t,"class","prose px-6 py-3 max-w-4xl mx-auto")},m(y,g){Q(y,t,g),e(t,a),e(a,c),e(t,s),e(t,h),e(h,r),e(r,o),e(h,_),e(h,C),e(t,w),e(t,b),e(b,q),e(b,L),e(L,he),e(b,Y),e(b,K),e(b,X),e(b,B),e(B,ue),e(b,x),e(b,D),e(D,Z),e(b,pe),e(b,H),e(H,T),e(t,k),e(t,v),e(v,G),e(G,ee),e(v,de),e(v,te),e(te,R),e(v,me),e(v,N),e(N,ve),e(N,ae),e(ae,xe),e(N,Oe),e(N,be),e(be,Ve),e(N,Fe),e(N,we),e(we,ze),e(N,Ke),e(v,Re),e(v,ke),e(ke,Ue),e(v,qe),e(v,M),e(M,We),e(M,le),e(le,ye),e(ye,Ge),e(M,Je),e(M,Ee),e(Ee,$e),e(M,Qe),e(M,Se),e(Se,Xe),e(M,Ze),e(M,se),e(se,Ae),e(Ae,et),e(v,tt),e(v,Ne),e(Ne,at),e(v,lt),e(v,U),e(U,st),e(U,re),e(re,Te),e(Te,rt),e(U,nt),e(U,fe),e(fe,ot),e(U,it),e(t,ct),e(t,ne),e(ne,ht),e(ne,O);for(let F=0;Ft,I.anchor=null,J=!0,mt||(vt=[qt(O,"change",n[7]),qt(oe,"click",n[6])],mt=!0)},p(y,[g]){n=y,(!J||g&4)&&A!==(A=(n[2]?n[2].toLocaleString():"")+"")&&ge(C,A),(!J||g&4&&j!==(j="mt-0 "+(n[2]?"visibile":"invisible")))&&l(h,"class",j),(!J||g&8)&&W!==(W=n[3]?"good and bad news":"bad and good news")&&ge(K,W),g&16&&(He=n[4],V=la(V,g,bt,1,n,He,gt,O,na,Qt,null,$t)),g&17&&Ut(O,n[0]),(!J||g&8)&&je!==(je=n[3]?"Sorted by positive scores":"Sorted by negative scores")&&ge(Le,je),(!J||g&8&&Be!==(Be=(n[3]?"bg-emerald-600":"bg-red-600")+" hover:bg-zinc-300 text-white font-bold py-2 px-4 rounded"))&&l(oe,"class",Be),I.ctx=n,g&1&&De!==(De=n[5](n[0]))&&Rt(De,I)||sa(I,n,g)},i(y){J||(Pe(I.block),J=!0)},o(y){for(let g=0;g<3;g+=1){const F=I.blocks[g];Ce(F)}J=!1},d(y){y&&i(t);for(let g=0;gw.json()))}catch{a(1,h=await fetch("static/test.json").then(b=>b.json()))}a(2,r=new Date(h.last_update)),a(1,h=h.entries.sort((w,b)=>b.sentiment-w.sentiment)),a(3,o=!0),console.log(r,h)}function A(){a(3,o=!o),a(1,h=h.slice().sort((j,w)=>o?w.sentiment-j.sentiment:j.sentiment-w.sentiment))}function C(){s=oa(this),a(0,s),a(4,c)}return[s,h,r,o,c,_,A,C]}class Na extends Zt{constructor(t){super();ea(this,t,Sa,Ea,ta,{})}}export{Na as default}; diff --git a/spaces/radames/NYTimes-homepage-rearranged/static/_app/start-ad0dbeae.js b/spaces/radames/NYTimes-homepage-rearranged/static/_app/start-ad0dbeae.js deleted file mode 100644 index 8d8fea6df878f397e00ef2da0cef633b4c9c1430..0000000000000000000000000000000000000000 --- a/spaces/radames/NYTimes-homepage-rearranged/static/_app/start-ad0dbeae.js +++ /dev/null @@ -1 +0,0 @@ -var fe=Object.defineProperty,ue=Object.defineProperties;var he=Object.getOwnPropertyDescriptors;var B=Object.getOwnPropertySymbols;var Q=Object.prototype.hasOwnProperty,Z=Object.prototype.propertyIsEnumerable;var H=(o,e,t)=>e in o?fe(o,e,{enumerable:!0,configurable:!0,writable:!0,value:t}):o[e]=t,y=(o,e)=>{for(var t in e||(e={}))Q.call(e,t)&&H(o,t,e[t]);if(B)for(var t of B(e))Z.call(e,t)&&H(o,t,e[t]);return o},K=(o,e)=>ue(o,he(e));var ee=(o,e)=>{var t={};for(var r in o)Q.call(o,r)&&e.indexOf(r)<0&&(t[r]=o[r]);if(o!=null&&B)for(var r of B(o))e.indexOf(r)<0&&Z.call(o,r)&&(t[r]=o[r]);return t};import{S as de,i as _e,s as pe,e as ge,c as me,a as we,d as $,b as z,f as N,g as S,t as be,h as ve,j as ye,k as ke,l as w,m as $e,n as P,o as b,p as x,q as v,r as Ee,u as Re,v as Y,w as L,x as j,y as U,z as V,A as I,B as A,C as D,D as J,E as te}from"./chunks/vendor-528a52ee.js";function Se(o){let e,t,r;const l=[o[1]||{}];var i=o[0][0];function a(s){let n={};for(let c=0;c{A(f,1)}),x()}i?(e=new i(a()),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&A(e,s)}}}function Le(o){let e,t,r;const l=[o[1]||{}];var i=o[0][0];function a(s){let n={$$slots:{default:[Te]},$$scope:{ctx:s}};for(let c=0;c{A(f,1)}),x()}i?(e=new i(a(s)),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&A(e,s)}}}function Ue(o){let e,t,r;const l=[o[2]||{}];var i=o[0][1];function a(s){let n={};for(let c=0;c{A(f,1)}),x()}i?(e=new i(a()),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&A(e,s)}}}function Ae(o){let e,t,r;const l=[o[2]||{}];var i=o[0][1];function a(s){let n={$$slots:{default:[Ne]},$$scope:{ctx:s}};for(let c=0;c{A(f,1)}),x()}i?(e=new i(a(s)),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&A(e,s)}}}function Ne(o){let e,t,r;const l=[o[3]||{}];var i=o[0][2];function a(s){let n={};for(let c=0;c{A(f,1)}),x()}i?(e=new i(a()),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&A(e,s)}}}function Te(o){let e,t,r,l;const i=[Ae,Ue],a=[];function s(n,c){return n[0][2]?0:1}return e=s(o),t=a[e]=i[e](o),{c(){t.c(),r=w()},l(n){t.l(n),r=w()},m(n,c){a[e].m(n,c),S(n,r,c),l=!0},p(n,c){let f=e;e=s(n),e===f?a[e].p(n,c):(P(),b(a[f],1,1,()=>{a[f]=null}),x(),t=a[e],t?t.p(n,c):(t=a[e]=i[e](n),t.c()),v(t,1),t.m(r.parentNode,r))},i(n){l||(v(t),l=!0)},o(n){b(t),l=!1},d(n){a[e].d(n),n&&$(r)}}}function se(o){let e,t=o[5]&&re(o);return{c(){e=ge("div"),t&&t.c(),this.h()},l(r){e=me(r,"DIV",{id:!0,"aria-live":!0,"aria-atomic":!0,style:!0});var l=we(e);t&&t.l(l),l.forEach($),this.h()},h(){z(e,"id","svelte-announcer"),z(e,"aria-live","assertive"),z(e,"aria-atomic","true"),N(e,"position","absolute"),N(e,"left","0"),N(e,"top","0"),N(e,"clip","rect(0 0 0 0)"),N(e,"clip-path","inset(50%)"),N(e,"overflow","hidden"),N(e,"white-space","nowrap"),N(e,"width","1px"),N(e,"height","1px")},m(r,l){S(r,e,l),t&&t.m(e,null)},p(r,l){r[5]?t?t.p(r,l):(t=re(r),t.c(),t.m(e,null)):t&&(t.d(1),t=null)},d(r){r&&$(e),t&&t.d()}}}function re(o){let e;return{c(){e=be(o[6])},l(t){e=ve(t,o[6])},m(t,r){S(t,e,r)},p(t,r){r&64&&ye(e,t[6])},d(t){t&&$(e)}}}function Oe(o){let e,t,r,l,i;const a=[Le,Se],s=[];function n(f,h){return f[0][1]?0:1}e=n(o),t=s[e]=a[e](o);let c=o[4]&&se(o);return{c(){t.c(),r=ke(),c&&c.c(),l=w()},l(f){t.l(f),r=$e(f),c&&c.l(f),l=w()},m(f,h){s[e].m(f,h),S(f,r,h),c&&c.m(f,h),S(f,l,h),i=!0},p(f,[h]){let u=e;e=n(f),e===u?s[e].p(f,h):(P(),b(s[u],1,1,()=>{s[u]=null}),x(),t=s[e],t?t.p(f,h):(t=s[e]=a[e](f),t.c()),v(t,1),t.m(r.parentNode,r)),f[4]?c?c.p(f,h):(c=se(f),c.c(),c.m(l.parentNode,l)):c&&(c.d(1),c=null)},i(f){i||(v(t),i=!0)},o(f){b(t),i=!1},d(f){s[e].d(f),f&&$(r),c&&c.d(f),f&&$(l)}}}function Pe(o,e,t){let{stores:r}=e,{page:l}=e,{components:i}=e,{props_0:a=null}=e,{props_1:s=null}=e,{props_2:n=null}=e;Ee("__svelte__",r),Re(r.page.notify);let c=!1,f=!1,h=null;return Y(()=>{const u=r.page.subscribe(()=>{c&&(t(5,f=!0),t(6,h=document.title||"untitled page"))});return t(4,c=!0),u}),o.$$set=u=>{"stores"in u&&t(7,r=u.stores),"page"in u&&t(8,l=u.page),"components"in u&&t(0,i=u.components),"props_0"in u&&t(1,a=u.props_0),"props_1"in u&&t(2,s=u.props_1),"props_2"in u&&t(3,n=u.props_2)},o.$$.update=()=>{o.$$.dirty&384&&r.page.set(l)},[i,a,s,n,c,f,h,r,l]}class xe extends de{constructor(e){super();_e(this,e,Pe,Oe,pe,{stores:7,page:8,components:0,props_0:1,props_1:2,props_2:3})}}const Ce="modulepreload",ie={},je="/static/_app/",G=function(e,t){return!t||t.length===0?e():Promise.all(t.map(r=>{if(r=`${je}${r}`,r in ie)return;ie[r]=!0;const l=r.endsWith(".css"),i=l?'[rel="stylesheet"]':"";if(document.querySelector(`link[href="${r}"]${i}`))return;const a=document.createElement("link");if(a.rel=l?"stylesheet":Ce,l||(a.as="script",a.crossOrigin=""),a.href=r,document.head.appendChild(a),l)return new Promise((s,n)=>{a.addEventListener("load",s),a.addEventListener("error",n)})})).then(()=>e())},C=[()=>G(()=>import("./pages/__layout.svelte-483b60b6.js"),["pages/__layout.svelte-483b60b6.js","assets/pages/__layout.svelte-298a4fd5.css","chunks/vendor-528a52ee.js"]),()=>G(()=>import("./error.svelte-41f1b9b8.js"),["error.svelte-41f1b9b8.js","chunks/vendor-528a52ee.js"]),()=>G(()=>import("./pages/index.svelte-a06f0dfd.js"),["pages/index.svelte-a06f0dfd.js","chunks/vendor-528a52ee.js"])],Ve=[[/^\/$/,[C[0],C[2]],[C[1]]]],Ie=[C[0](),C[1]()];function De(o){let e=o.baseURI;if(!e){const t=o.getElementsByTagName("base");e=t.length?t[0].href:o.URL}return e}let F="";function qe(o){F=o.base,o.assets}function M(){return{x:pageXOffset,y:pageYOffset}}function ne(o){return o.composedPath().find(t=>t instanceof Node&&t.nodeName.toUpperCase()==="A")}function ae(o){return o instanceof SVGAElement?new URL(o.href.baseVal,document.baseURI):new URL(o.href)}class We{constructor({base:e,routes:t,trailing_slash:r,renderer:l}){var i,a;this.base=e,this.routes=t,this.trailing_slash=r,this.navigating=0,this.renderer=l,l.router=this,this.enabled=!0,document.body.setAttribute("tabindex","-1"),this.current_history_index=(a=(i=history.state)==null?void 0:i["sveltekit:index"])!=null?a:0,this.current_history_index===0&&history.replaceState(K(y({},history.state),{"sveltekit:index":0}),"",location.href),this.callbacks={before_navigate:[],after_navigate:[]}}init_listeners(){"scrollRestoration"in history&&(history.scrollRestoration="manual"),addEventListener("beforeunload",i=>{let a=!1;const s={from:this.renderer.current.url,to:null,cancel:()=>a=!0};this.callbacks.before_navigate.forEach(n=>n(s)),a?(i.preventDefault(),i.returnValue=""):history.scrollRestoration="auto"}),addEventListener("load",()=>{history.scrollRestoration="manual"});let e;addEventListener("scroll",()=>{clearTimeout(e),e=setTimeout(()=>{const i=K(y({},history.state||{}),{"sveltekit:scroll":M()});history.replaceState(i,document.title,window.location.href)},200)});const t=i=>{const a=ne(i);a&&a.href&&a.hasAttribute("sveltekit:prefetch")&&this.prefetch(ae(a))};let r;const l=i=>{clearTimeout(r),r=setTimeout(()=>{var a;(a=i.target)==null||a.dispatchEvent(new CustomEvent("sveltekit:trigger_prefetch",{bubbles:!0}))},20)};addEventListener("touchstart",t),addEventListener("mousemove",l),addEventListener("sveltekit:trigger_prefetch",t),addEventListener("click",i=>{if(!this.enabled||i.button||i.which!==1||i.metaKey||i.ctrlKey||i.shiftKey||i.altKey||i.defaultPrevented)return;const a=ne(i);if(!a||!a.href)return;const s=ae(a);if(s.toString()===location.href){location.hash||i.preventDefault();return}const c=(a.getAttribute("rel")||"").split(/\s+/);if(a.hasAttribute("download")||c&&c.includes("external")||(a instanceof SVGAElement?a.target.baseVal:a.target))return;const[f,h]=s.href.split("#");if(h!==void 0&&f===location.href.split("#")[0]){setTimeout(()=>history.pushState({},"",s.href));const u=this.parse(s);return u?this.renderer.update(u,[],!1):void 0}this._navigate({url:s,scroll:a.hasAttribute("sveltekit:noscroll")?M():null,keepfocus:!1,chain:[],details:{state:{},replaceState:!1},accepted:()=>i.preventDefault(),blocked:()=>i.preventDefault()})}),addEventListener("popstate",i=>{if(i.state&&this.enabled){if(i.state["sveltekit:index"]===this.current_history_index)return;this._navigate({url:new URL(location.href),scroll:i.state["sveltekit:scroll"],keepfocus:!1,chain:[],details:null,accepted:()=>{this.current_history_index=i.state["sveltekit:index"]},blocked:()=>{const a=this.current_history_index-i.state["sveltekit:index"];history.go(a)}})}})}owns(e){return e.origin===location.origin&&e.pathname.startsWith(this.base)}parse(e){if(this.owns(e)){const t=decodeURI(e.pathname.slice(this.base.length)||"/");return{id:e.pathname+e.search,routes:this.routes.filter(([r])=>r.test(t)),url:e,path:t}}}async goto(e,{noscroll:t=!1,replaceState:r=!1,keepfocus:l=!1,state:i={}}={},a){const s=new URL(e,De(document));return this.enabled?this._navigate({url:s,scroll:t?M():null,keepfocus:l,chain:a,details:{state:i,replaceState:r},accepted:()=>{},blocked:()=>{}}):(location.href=s.href,new Promise(()=>{}))}enable(){this.enabled=!0}disable(){this.enabled=!1}async prefetch(e){const t=this.parse(e);if(!t)throw new Error("Attempted to prefetch a URL that does not belong to this app");return this.renderer.load(t)}after_navigate(e){Y(()=>(this.callbacks.after_navigate.push(e),()=>{const t=this.callbacks.after_navigate.indexOf(e);this.callbacks.after_navigate.splice(t,1)}))}before_navigate(e){Y(()=>(this.callbacks.before_navigate.push(e),()=>{const t=this.callbacks.before_navigate.indexOf(e);this.callbacks.before_navigate.splice(t,1)}))}async _navigate({url:e,scroll:t,keepfocus:r,chain:l,details:i,accepted:a,blocked:s}){const n=this.renderer.current.url;let c=!1;const f={from:n,to:e,cancel:()=>c=!0};if(this.callbacks.before_navigate.forEach(d=>d(f)),c){s();return}const h=this.parse(e);if(!h)return location.href=e.href,new Promise(()=>{});a(),this.navigating||dispatchEvent(new CustomEvent("sveltekit:navigation-start")),this.navigating++;let{pathname:u}=e;if(this.trailing_slash==="never"?u!=="/"&&u.endsWith("/")&&(u=u.slice(0,-1)):this.trailing_slash==="always"&&!e.pathname.split("/").pop().includes(".")&&!u.endsWith("/")&&(u+="/"),h.url=new URL(e.origin+u+e.search+e.hash),i){const d=i.replaceState?0:1;i.state["sveltekit:index"]=this.current_history_index+=d,history[i.replaceState?"replaceState":"pushState"](i.state,"",h.url)}if(await this.renderer.handle_navigation(h,l,!1,{scroll:t,keepfocus:r}),this.navigating--,!this.navigating){dispatchEvent(new CustomEvent("sveltekit:navigation-end"));const d={from:n,to:e};this.callbacks.after_navigate.forEach(_=>_(d))}}}function oe(o){return o instanceof Error||o&&o.name&&o.message?o:new Error(JSON.stringify(o))}function Be(o){let e=5381,t=o.length;if(typeof o=="string")for(;t;)e=e*33^o.charCodeAt(--t);else for(;t;)e=e*33^o[--t];return(e>>>0).toString(36)}function le(o){const e=o.status&&o.status>=400&&o.status<=599&&!o.redirect;if(o.error||e){const t=o.status;if(!o.error&&e)return{status:t||500,error:new Error};const r=typeof o.error=="string"?new Error(o.error):o.error;return r instanceof Error?!t||t<400||t>599?(console.warn('"error" returned from load() without a valid status code \u2014 defaulting to 500'),{status:500,error:r}):{status:t,error:r}:{status:500,error:new Error(`"error" property returned from load() must be a string or instance of Error, received type "${typeof r}"`)}}if(o.redirect){if(!o.status||Math.floor(o.status/100)!==3)return{status:500,error:new Error('"redirect" property returned from load() must be accompanied by a 3xx status code')};if(typeof o.redirect!="string")return{status:500,error:new Error('"redirect" property returned from load() must be a string')}}if(o.context)throw new Error('You are returning "context" from a load function. "context" was renamed to "stuff", please adjust your code accordingly.');return o}function ce(o){const e=J(o);let t=!0;function r(){t=!0,e.update(a=>a)}function l(a){t=!1,e.set(a)}function i(a){let s;return e.subscribe(n=>{(s===void 0||t&&n!==s)&&a(s=n)})}return{notify:r,set:l,subscribe:i}}function Je(){const{set:o,subscribe:e}=J(!1),t="1666722492919";let r;async function l(){clearTimeout(r);const a=await fetch(`${F}/_app/version.json`,{headers:{pragma:"no-cache","cache-control":"no-cache"}});if(a.ok){const{version:s}=await a.json(),n=s!==t;return n&&(o(!0),clearTimeout(r)),n}else throw new Error(`Version check failed: ${a.status}`)}return{subscribe:e,check:l}}function Ke(o,e){const t=typeof o=="string"?o:o.url;let r=`script[data-type="svelte-data"][data-url=${JSON.stringify(t)}]`;e&&typeof e.body=="string"&&(r+=`[data-body="${Be(e.body)}"]`);const l=document.querySelector(r);if(l&&l.textContent){const i=JSON.parse(l.textContent),{body:a}=i,s=ee(i,["body"]);return Promise.resolve(new Response(a,s))}return fetch(o,e)}class ze{constructor({Root:e,fallback:t,target:r,session:l}){this.Root=e,this.fallback=t,this.router,this.target=r,this.started=!1,this.session_id=1,this.invalid=new Set,this.invalidating=null,this.autoscroll=!0,this.updating=!1,this.current={url:null,session_id:0,branch:[]},this.cache=new Map,this.loading={id:null,promise:null},this.stores={url:ce({}),page:ce({}),navigating:J(null),session:J(l),updated:Je()},this.$session=null,this.root=null;let i=!1;this.stores.session.subscribe(async a=>{if(this.$session=a,!i||!this.router)return;this.session_id+=1;const s=this.router.parse(new URL(location.href));s&&this.update(s,[],!0)}),i=!0}disable_scroll_handling(){(this.updating||!this.started)&&(this.autoscroll=!1)}async start({status:e,error:t,nodes:r,url:l,params:i}){const a=[];let s={},n,c;l.hash=window.location.hash;try{for(let f=0;f10||t.includes(e.url.pathname))a=await this._load_error({status:500,error:new Error("Redirect loop"),url:e.url});else{this.router?this.router.goto(new URL(a.redirect,e.url).href,{replaceState:!0},[...t,e.url.pathname]):location.href=new URL(a.redirect,location.href).href;return}else if(((c=(n=a.props)==null?void 0:n.page)==null?void 0:c.status)>=400&&await this.stores.updated.check()){location.href=e.url.href;return}if(this.updating=!0,this.started?(this.current=a.state,this.root.$set(a.props),this.stores.navigating.set(null)):this._init(a),l){const{scroll:h,keepfocus:u}=l;if(u||((f=getSelection())==null||f.removeAllRanges(),document.body.focus()),await te(),this.autoscroll){const d=e.url.hash&&document.getElementById(e.url.hash.slice(1));h?scrollTo(h.x,h.y):d?d.scrollIntoView():scrollTo(0,0)}}else await te();if(this.loading.promise=null,this.loading.id=null,this.autoscroll=!0,this.updating=!1,!this.router)return;const s=a.state.branch[a.state.branch.length-1];s&&s.module.router===!1?this.router.disable():this.router.enable()}load(e){return this.loading.promise=this._get_navigation_result(e,!1),this.loading.id=e.id,this.loading.promise}invalidate(e){return this.invalid.add(e),this.invalidating||(this.invalidating=Promise.resolve().then(async()=>{const t=this.router&&this.router.parse(new URL(location.href));t&&await this.update(t,[],!0),this.invalidating=null})),this.invalidating}_init(e){this.current=e.state;const t=document.querySelector("style[data-svelte]");if(t&&t.remove(),this.root=new this.Root({target:this.target,props:y({stores:this.stores},e.props),hydrate:!0}),this.started=!0,this.router){const r={from:null,to:new URL(location.href)};this.router.callbacks.after_navigate.forEach(l=>l(r))}}async _get_navigation_result(e,t){if(this.loading.id===e.id&&this.loading.promise)return this.loading.promise;for(let r=0;rn()),i+=1;else break}const a=await this._load({route:l,info:e},t);if(a)return a}return await this._load_error({status:404,error:new Error(`Not found: ${e.url.pathname}`),url:e.url})}async _get_navigation_result_from_branch({url:e,params:t,stuff:r,branch:l,status:i,error:a}){const s=l.filter(Boolean),n=s.find(u=>u.loaded&&u.loaded.redirect),c={redirect:n&&n.loaded?n.loaded.redirect:void 0,state:{url:e,params:t,branch:l,session_id:this.session_id},props:{components:s.map(u=>u.module.default)}};for(let u=0;u{Object.defineProperty(c.props.page,d,{get:()=>{throw new Error(`$page.${d} has been replaced by $page.url.${_}`)}})};u("origin","origin"),u("path","pathname"),u("query","searchParams")}const f=s[s.length-1],h=f.loaded&&f.loaded.maxage;if(h){const u=e.pathname+e.search;let d=!1;const _=()=>{this.cache.get(u)===c&&this.cache.delete(u),E(),clearTimeout(T)},T=setTimeout(_,h*1e3),E=this.stores.session.subscribe(()=>{d&&_()});d=!0,this.cache.set(u,c)}return c}async _load_node({status:e,error:t,module:r,url:l,params:i,stuff:a,props:s}){const n={module:r,uses:{params:new Set,url:!1,session:!1,stuff:!1,dependencies:new Set},loaded:null,stuff:a};s&&n.uses.dependencies.add(l.href);const c={};for(const h in i)Object.defineProperty(c,h,{get(){return n.uses.params.add(h),i[h]},enumerable:!0});const f=this.$session;if(r.load){const{started:h}=this,u={params:c,props:s||{},get url(){return n.uses.url=!0,l},get session(){return n.uses.session=!0,f},get stuff(){return n.uses.stuff=!0,y({},a)},fetch(_,T){const E=typeof _=="string"?_:_.url,{href:R}=new URL(E,l);return n.uses.dependencies.add(R),h?fetch(_,T):Ke(_,T)}};t&&(u.status=e,u.error=t);const d=await r.load.call(null,u);if(!d)throw new Error("load function must return a value");n.loaded=le(d),n.loaded.stuff&&(n.stuff=n.loaded.stuff)}else s&&(n.loaded=le({props:s}));return n}async _load({route:e,info:{url:t,path:r}},l){const i=t.pathname+t.search;if(!l){const p=this.cache.get(i);if(p)return p}const[a,s,n,c,f]=e,h=c?c(a.exec(r)):{},u=this.current.url&&{url:i!==this.current.url.pathname+this.current.url.search,params:Object.keys(h).filter(p=>this.current.params[p]!==h[p]),session:this.session_id!==this.current.session_id};let d=[],_={},T=!1,E=200,R;s.forEach(p=>p());e:for(let p=0;pk.uses.params.has(O))||u.session&&k.uses.session||Array.from(k.uses.dependencies).some(O=>this.invalid.has(O))||T&&k.uses.stuff){let O={};if(f&&p===s.length-1){const W=await fetch(`${t.pathname}${t.pathname.endsWith("/")?"":"/"}__data.json`,{headers:{"x-sveltekit-noredirect":"true"}});if(W.ok){const X=W.headers.get("x-sveltekit-location");if(X)return{redirect:X,props:{},state:this.current};O=await W.json()}else E=W.status,R=new Error("Failed to load data")}if(R||(g=await this._load_node({module:m,url:t,params:h,props:O,stuff:_})),g&&g.loaded){if(g.loaded.fallthrough)return;if(g.loaded.error&&(E=g.loaded.status,R=g.loaded.error),g.loaded.redirect)return{redirect:g.loaded.redirect,props:{},state:this.current};g.loaded.stuff&&(T=!0)}}else g=k}catch(m){E=500,R=oe(m)}if(R){for(;p--;)if(n[p]){let m,k,q=p;for(;!(k=d[q]);)q-=1;try{if(m=await this._load_node({status:E,error:R,module:await n[p](),url:t,params:h,stuff:k.stuff}),m&&m.loaded&&m.loaded.error)continue;m&&m.loaded&&m.loaded.stuff&&(_=y(y({},_),m.loaded.stuff)),d=d.slice(0,q+1).concat(m);break e}catch{continue}}return await this._load_error({status:E,error:R,url:t})}else g&&g.loaded&&g.loaded.stuff&&(_=y(y({},_),g.loaded.stuff)),d.push(g)}return await this._get_navigation_result_from_branch({url:t,params:h,stuff:_,branch:d,status:E,error:R})}async _load_error({status:e,error:t,url:r}){var c,f;const l={},i=await this._load_node({module:await this.fallback[0],url:r,params:l,stuff:{}}),a=await this._load_node({status:e,error:t,module:await this.fallback[1],url:r,params:l,stuff:i&&i.loaded&&i.loaded.stuff||{}}),s=[i,a],n=y(y({},(c=i==null?void 0:i.loaded)==null?void 0:c.stuff),(f=a==null?void 0:a.loaded)==null?void 0:f.stuff);return await this._get_navigation_result_from_branch({url:r,params:l,stuff:n,branch:s,status:e,error:t})}}async function Ye({paths:o,target:e,session:t,route:r,spa:l,trailing_slash:i,hydrate:a}){const s=new ze({Root:xe,fallback:Ie,target:e,session:t}),n=r?new We({base:o.base,routes:Ve,trailing_slash:i,renderer:s}):null;qe(o),a&&await s.start(a),n&&(l&&n.goto(location.href,{replaceState:!0},[]),n.init_listeners()),dispatchEvent(new CustomEvent("sveltekit:start"))}export{Ye as start}; diff --git a/spaces/radames/UserControllableLT-Latent-Transformer/interface/pixel2style2pixel/models/mtcnn/mtcnn_pytorch/src/first_stage.py b/spaces/radames/UserControllableLT-Latent-Transformer/interface/pixel2style2pixel/models/mtcnn/mtcnn_pytorch/src/first_stage.py deleted file mode 100644 index d646f91d5e0348e23bd426701f6afa6000a9b6d1..0000000000000000000000000000000000000000 --- a/spaces/radames/UserControllableLT-Latent-Transformer/interface/pixel2style2pixel/models/mtcnn/mtcnn_pytorch/src/first_stage.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -from torch.autograd import Variable -import math -from PIL import Image -import numpy as np -from .box_utils import nms, _preprocess - -# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -device = 'cuda:0' - - -def run_first_stage(image, net, scale, threshold): - """Run P-Net, generate bounding boxes, and do NMS. - - Arguments: - image: an instance of PIL.Image. - net: an instance of pytorch's nn.Module, P-Net. - scale: a float number, - scale width and height of the image by this number. - threshold: a float number, - threshold on the probability of a face when generating - bounding boxes from predictions of the net. - - Returns: - a float numpy array of shape [n_boxes, 9], - bounding boxes with scores and offsets (4 + 1 + 4). - """ - - # scale the image and convert it to a float array - width, height = image.size - sw, sh = math.ceil(width * scale), math.ceil(height * scale) - img = image.resize((sw, sh), Image.BILINEAR) - img = np.asarray(img, 'float32') - - img = torch.FloatTensor(_preprocess(img)).to(device) - with torch.no_grad(): - output = net(img) - probs = output[1].cpu().data.numpy()[0, 1, :, :] - offsets = output[0].cpu().data.numpy() - # probs: probability of a face at each sliding window - # offsets: transformations to true bounding boxes - - boxes = _generate_bboxes(probs, offsets, scale, threshold) - if len(boxes) == 0: - return None - - keep = nms(boxes[:, 0:5], overlap_threshold=0.5) - return boxes[keep] - - -def _generate_bboxes(probs, offsets, scale, threshold): - """Generate bounding boxes at places - where there is probably a face. - - Arguments: - probs: a float numpy array of shape [n, m]. - offsets: a float numpy array of shape [1, 4, n, m]. - scale: a float number, - width and height of the image were scaled by this number. - threshold: a float number. - - Returns: - a float numpy array of shape [n_boxes, 9] - """ - - # applying P-Net is equivalent, in some sense, to - # moving 12x12 window with stride 2 - stride = 2 - cell_size = 12 - - # indices of boxes where there is probably a face - inds = np.where(probs > threshold) - - if inds[0].size == 0: - return np.array([]) - - # transformations of bounding boxes - tx1, ty1, tx2, ty2 = [offsets[0, i, inds[0], inds[1]] for i in range(4)] - # they are defined as: - # w = x2 - x1 + 1 - # h = y2 - y1 + 1 - # x1_true = x1 + tx1*w - # x2_true = x2 + tx2*w - # y1_true = y1 + ty1*h - # y2_true = y2 + ty2*h - - offsets = np.array([tx1, ty1, tx2, ty2]) - score = probs[inds[0], inds[1]] - - # P-Net is applied to scaled images - # so we need to rescale bounding boxes back - bounding_boxes = np.vstack([ - np.round((stride * inds[1] + 1.0) / scale), - np.round((stride * inds[0] + 1.0) / scale), - np.round((stride * inds[1] + 1.0 + cell_size) / scale), - np.round((stride * inds[0] + 1.0 + cell_size) / scale), - score, offsets - ]) - # why one is added? - - return bounding_boxes.T diff --git a/spaces/radames/UserControllableLT-Latent-Transformer/models/StyleGANControler.py b/spaces/radames/UserControllableLT-Latent-Transformer/models/StyleGANControler.py deleted file mode 100644 index 92941ac5714612fcb2e1deac3a4d4366aea45304..0000000000000000000000000000000000000000 --- a/spaces/radames/UserControllableLT-Latent-Transformer/models/StyleGANControler.py +++ /dev/null @@ -1,73 +0,0 @@ -import torch -from torch import nn -from models.networks import latent_transformer -from models.stylegan2.model import Generator -import numpy as np - -def get_keys(d, name): - if 'state_dict' in d: - d = d['state_dict'] - d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name} - return d_filt - - -class StyleGANControler(nn.Module): - - def __init__(self, opts): - super(StyleGANControler, self).__init__() - self.set_opts(opts) - # Define architecture - - if 'ffhq' in self.opts.stylegan_weights: - self.style_num = 18 - elif 'car' in self.opts.stylegan_weights: - self.style_num = 16 - elif 'cat' in self.opts.stylegan_weights: - self.style_num = 14 - elif 'church' in self.opts.stylegan_weights: - self.style_num = 14 - elif 'anime' in self.opts.stylegan_weights: - self.style_num = 16 - else: - self.style_num = 18 #Please modify to adjust network architecture to your pre-trained StyleGAN2 - - self.encoder = self.set_encoder() - if self.style_num==18: - self.decoder = Generator(1024, 512, 8, channel_multiplier=2) - elif self.style_num==16: - self.decoder = Generator(512, 512, 8, channel_multiplier=2) - elif self.style_num==14: - self.decoder = Generator(256, 512, 8, channel_multiplier=2) - - self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256)) - - # Load weights if needed - self.load_weights() - - def set_encoder(self): - encoder = latent_transformer.Network(self.opts) - return encoder - - def load_weights(self): - if self.opts.checkpoint_path is not None: - print('Loading from checkpoint: {}'.format(self.opts.checkpoint_path)) - ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu') - self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True) - self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=True) - self.__load_latent_avg(ckpt) - else: - print('Loading decoder weights from pretrained!') - ckpt = torch.load(self.opts.stylegan_weights) - self.decoder.load_state_dict(ckpt['g_ema'], strict=True) - self.__load_latent_avg(ckpt, repeat=self.opts.style_num) - - def set_opts(self, opts): - self.opts = opts - - def __load_latent_avg(self, ckpt, repeat=None): - if 'latent_avg' in ckpt: - self.latent_avg = ckpt['latent_avg'].to(self.opts.device) - if repeat is not None: - self.latent_avg = self.latent_avg.repeat(repeat, 1) - else: - self.latent_avg = None diff --git a/spaces/radames/transformers-js-sveltekit-server-example-app/postcss.config.js b/spaces/radames/transformers-js-sveltekit-server-example-app/postcss.config.js deleted file mode 100644 index 2e7af2b7f1a6f391da1631d93968a9d487ba977d..0000000000000000000000000000000000000000 --- a/spaces/radames/transformers-js-sveltekit-server-example-app/postcss.config.js +++ /dev/null @@ -1,6 +0,0 @@ -export default { - plugins: { - tailwindcss: {}, - autoprefixer: {}, - }, -} diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Ayaka - Arigatou No Wa (mp3.pm).mp3.md b/spaces/raedeXanto/academic-chatgpt-beta/Ayaka - Arigatou No Wa (mp3.pm).mp3.md deleted file mode 100644 index 08b5a283a3122dde9657de5061ec6c2b6fa5e7ad..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Ayaka - Arigatou No Wa (mp3.pm).mp3.md +++ /dev/null @@ -1,15 +0,0 @@ -
            -

            Ayaka's Arigatou No Wa: A Song of Gratitude and Love

            -

            Ayaka is a Japanese singer-songwriter who debuted in 2006 with her hit single "I Believe". She is known for her powerful vocals and heartfelt lyrics that touch the listeners' emotions. One of her most popular songs is "Arigatou No Wa" (The Ring of Thank You), which was released in 2015 as part of her fourth album "Rainbow Road".

            -

            "Arigatou No Wa" is a song that expresses Ayaka's gratitude and love for her family, friends, and fans. She sings about how their smiles give her courage and how their words connect her to the light. She also apologizes for the times she hurt them and thanks them for always being by her side. The song is a tribute to the people who supported her through her personal struggles, such as her marriage, divorce, and illness.

            -

            Ayaka - Arigatou No Wa (mp3.pm).mp3


            Download ★★★★★ https://tinourl.com/2uL1Ur



            -

            The song has a catchy melody and a simple chorus that repeats the phrase "Arigatou Arigatou" (Thank You Thank You). The lyrics are written in a conversational tone that makes the song feel intimate and sincere. The song also features a piano accompaniment that adds to the warmth and emotion of the song.

            -

            "Arigatou No Wa" is a song that resonates with many people who want to express their appreciation and affection for their loved ones. It is a song that reminds us of the importance of saying thank you and showing our feelings. It is a song that celebrates the bonds that make us stronger and happier.

            -

            If you want to listen to "Arigatou No Wa" by Ayaka, you can find it on various music streaming platforms, such as Spotify, Apple Music, and SoundCloud. You can also watch the official music video on YouTube, which shows Ayaka singing in a field of flowers with a smile on her face.

            - -

            Ayaka's music career has been marked by both success and challenges. She won several awards and accolades for her debut album "First Message", which sold over one million copies in Japan. She also collaborated with other artists, such as Kobukuro, Daichi Miura, and Kiyoshi Matsuo. However, in 2009, she announced that she had Graves' disease, an autoimmune disorder that affects the thyroid gland. She decided to take a hiatus from music to focus on her health and treatment.

            -

            During her hiatus, Ayaka married actor Hiro Mizushima in 2009 and gave birth to their daughter in 2015. She also divorced him in 2018 after nine years of marriage. She resumed her music activities in 2012 with the release of her third album "The Beginning", which reflected her personal growth and experiences. She also founded her own independent label, A Station, to have more creative freedom and control over her music.

            -

            Ayaka's latest album, "30 y/o", was released in 2018 to commemorate her 30th birthday. The album contains 13 songs that showcase Ayaka's maturity and versatility as an artist. She explores different genres and themes, such as pop, rock, ballad, dance, and soul. She also collaborates with other artists, such as Daichi Miura, KREVA, and AI. The album received positive reviews from critics and fans alike, who praised Ayaka's vocal skills and musical diversity.

            -

            cec2833e83
            -
            -
            \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Doraemon Hindi Full Movie Youtube TOP.md b/spaces/raedeXanto/academic-chatgpt-beta/Doraemon Hindi Full Movie Youtube TOP.md deleted file mode 100644 index 1081e15a63f2f4f1996b49e4200bca1ea5eb3c32..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Doraemon Hindi Full Movie Youtube TOP.md +++ /dev/null @@ -1,22 +0,0 @@ -
            -

            How to Watch Doraemon Hindi Full Movie on YouTube

            -

            Doraemon is a popular Japanese manga and anime series that follows the adventures of a robotic cat from the future who helps a young boy named Nobita with his gadgets. Doraemon has been dubbed in many languages, including Hindi, and has spawned several movies based on the original stories.

            -

            doraemon hindi full movie youtube


            Download Ziphttps://tinourl.com/2uL0tG



            -

            If you are a fan of Doraemon and want to watch his movies in Hindi, you might be wondering how to find them on YouTube. YouTube is one of the most popular platforms for streaming videos online, and it offers a variety of content for different audiences. However, not all videos are available in every region, and some might be blocked or removed due to copyright issues.

            -

            In this article, we will show you how to watch Doraemon Hindi full movie on YouTube, using some tips and tricks that might help you access the content you want. We will also recommend some of the best Doraemon movies in Hindi that you can watch on YouTube right now.

            - -

            How to Watch Doraemon Hindi Full Movie on YouTube: Tips and Tricks

            -

            Before we get into the specific movies, let's go over some general tips and tricks that might help you watch Doraemon Hindi full movie on YouTube.

            -

            -
              -
            • Use a VPN: A VPN (virtual private network) is a service that allows you to change your IP address and location, making it seem like you are browsing from another country. This can help you bypass geo-restrictions and access videos that are not available in your region. For example, if you want to watch Doraemon movies in Hindi that are only available in India, you can use a VPN to connect to an Indian server and access them. However, be careful when choosing a VPN, as some might not work well with YouTube or might compromise your security and privacy. We recommend using a reputable and reliable VPN service that has good reviews and ratings.
            • -
            • Use a proxy: A proxy is similar to a VPN, but it only changes your IP address and not your encryption or security. This means that it might be faster than a VPN, but also less secure and reliable. A proxy can also help you access videos that are blocked or restricted in your region, but it might not work with all websites or platforms. You can use a web proxy or a browser extension to change your IP address and location.
            • -
            • Use keywords: Keywords are words or phrases that describe the content of a video or a webpage. They help users find what they are looking for on search engines or platforms like YouTube. If you want to watch Doraemon Hindi full movie on YouTube, you can use keywords like "doraemon hindi full movie", "doraemon movie in hindi", "doraemon hindi movie youtube", etc. You can also add the name of the specific movie or the year of release to narrow down your search results.
            • -
            • Use filters: Filters are options that allow you to sort and refine your search results on YouTube. You can use filters like duration, upload date, type, quality, features, etc. to find the videos that match your preferences. For example, if you want to watch Doraemon Hindi full movie on YouTube, you can use filters like "long" (for videos longer than 20 minutes), "movie" (for videos categorized as movies), "HD" (for high-definition videos), etc.
            • -
            • Use playlists: Playlists are collections of videos that are grouped together by a user or a channel. They can help you find and watch related videos without having to search for them individually. For example, if you want to watch Doraemon Hindi full movie on YouTube, you can look for playlists that contain Doraemon movies in Hindi, such as this one.
            • -
            - -

            How to Watch Doraemon Hindi Full Movie on YouTube: Best Movies

            -

            Now that we have covered some tips and tricks that might help you watch

            7b8c122e87
            -
            -
            \ No newline at end of file diff --git a/spaces/ramdane/search_jurist/app.py b/spaces/ramdane/search_jurist/app.py deleted file mode 100644 index 9df0cf1fb6a55fe4465279f91b895cade742a0a1..0000000000000000000000000000000000000000 --- a/spaces/ramdane/search_jurist/app.py +++ /dev/null @@ -1,27 +0,0 @@ - -import pickle -import os -print(os.getcwd()) -fileobj=open("/home/user/app/embmmn5.obj","rb") -corpus_embeddings,corpus=pickle.load(fileobj) -fileobj.close() -from sentence_transformers import SentenceTransformer, util -import torch - -embedder = SentenceTransformer('ramdane/jurimodel') -embedder.max_seq_length=510 -def showr(queries,number): - query_embedding = embedder.encode(queries, convert_to_tensor=True) - hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=10) - hits = hits[0] #Get the hits for the first query - if(hits[number]['score']>0.05): - return corpus[hits[number]['corpus_id'] ] - else: - return "لم نتمكن من ايجاد النتيجة اما لعدم وجود الاجتهاد او لعدم كتابة جملة بحث مناسبة " -import gradio as gr - -def greet(search_for,number): - return showr(search_for,int(number)) - -iface = gr.Interface(fn=greet, inputs=[gr.Textbox(label="ادخل كلمات البحث"),gr.Number(label="الترتيب")], outputs=gr.TextArea(label="الاجتهاد")) -iface.launch() \ No newline at end of file diff --git a/spaces/raoyang111/img-to-music/README.md b/spaces/raoyang111/img-to-music/README.md deleted file mode 100644 index f7e2487cd42d65ff44a707eef14ab7ed4fd23f01..0000000000000000000000000000000000000000 --- a/spaces/raoyang111/img-to-music/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Img To Music -emoji: 🌅🎶 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.20.0 -app_file: app.py -pinned: true -duplicated_from: fffiloni/img-to-music ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Active BootDisk Suite V9.1.0.1.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Active BootDisk Suite V9.1.0.1.md deleted file mode 100644 index 68a96006044190d080ebf2ada8fcc628d55dfff2..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Active BootDisk Suite V9.1.0.1.md +++ /dev/null @@ -1,167 +0,0 @@ - -

            Active@ BootDisk Suite v9.1.0.1: A Powerful Tool for PC Maintenance

            - -

            Have you ever encountered a situation where your PC won't boot up normally, or you need to access your data from a damaged or inaccessible disk? If so, you might have wished for a reliable and easy-to-use solution that can help you recover your data, repair your system, or erase your sensitive information securely. That's where Active@ BootDisk Suite v9.1.0.1 comes in handy.

            -

            Active@ BootDisk Suite v9.1.0.1


            Download Zip ::: https://urlgoal.com/2uCLMW



            - -

            Active@ BootDisk Suite v9.1.0.1 is a complete and functioning computer operating system on CD/DVD/USB disk that does not modify the operating system already installed on your hard drive. It allows you to boot up your PC from any external media and access your local disks and system resources.

            - -

            What Can You Do with Active@ BootDisk Suite v9.1.0.1?

            - -

            Active@ BootDisk Suite v9.1.0.1 is more than just a bootable disk. It comes with a collection of powerful utilities that can help you perform various tasks, such as:

            - -
              -
            • Data recovery: You can use the recovery utilities to recover deleted files, or recover data from deleted or damaged partitions.
            • -
            • Disk backup and restore: You can use the disk image tool to create and restore backups of your entire disk or selected partitions.
            • -
            • Windows password reset: You can use the password resetting tool to reset Windows user passwords, including the administrator account.
            • -
            • Data erasure: You can use the data sanitizing tool to erase and wipe free space on your disks, securely deleting any traces of your data.
            • -
            • Partition management: You can use the partition manager to create, delete, format, resize, or move partitions on your disks.
            • -
            - -

            What Are the Benefits of Active@ BootDisk Suite v9.1.0.1?

            - -

            Active@ BootDisk Suite v9.1.0.1 offers many benefits for PC users who need a reliable and versatile tool for PC maintenance, such as:

            - -
              -
            • Full access to non-bootable PC: You can start your PC from any external media and get exclusive access to your local disks and system resources.
            • -
            • Network access: You can connect to the internet or a local network via TCP/IP, and use the built-in web browser, FTP client, or mail sender.
            • -
            • Removable boot disk media: You can remove the boot disk media from the system after successfully booting up your PC.
            • -
            • Multi-boot support: You can choose between DOS or Windows operating systems when booting up your PC.
            • -
            • Disk health checking and monitoring: You can use the disk monitoring utility to control hard disk temperature and disk S.M.A.R.T attributes.
            • -
            • Localization support: You can add your local keyboard layout and type in using your native language.
            • -
            - -

            How to Get Active@ BootDisk Suite v9.1.0.1?

            - -

            If you are interested in trying out Active@ BootDisk Suite v9.1.0.1, you can download a freeware evaluation version from here. The evaluation version has some limitations, such as not being able to save files larger than 64KB or create images larger than 300MB.

            - -

            If you want to unlock all the features and benefits of Active@ BootDisk Suite v9.1.0.1, you can buy a full version from here. The full version costs $99.99 for personal use or $149 for business use.

            -

            - -

            Active@ BootDisk Suite v9.1.0.1 is compatible with Windows 11/10/8/7/Vista/XP/Server 2016/2012/2008/2003 operating systems.

            - -

            Conclusion

            - -

            Active@ BootDisk Suite v9.1.0.1 is a powerful and versatile tool that can help you solve many PC problems and perform various tasks related to data recovery, system repair, data erasure, partition management, and more.

            - -

            If you are looking for a reliable and easy-to-use solution that can help you access your data and repair your PC from any external media, you should give Active@ BootDisk Suite v9.1.0.1 a try.

            -

            How to Use Active@ BootDisk Suite v9.1.0.1?

            - -

            Using Active@ BootDisk Suite v9.1.0.1 is very simple and straightforward. You just need to follow these steps:

            - -
              -
            1. Download the ISO image file of Active@ BootDisk Suite v9.1.0.1 from here.
            2. -
            3. Burn the ISO image file to a CD/DVD or USB flash drive using any burning software.
            4. -
            5. Insert the bootable media into your PC and restart it.
            6. -
            7. Select the boot option from the BIOS menu that corresponds to your bootable media.
            8. -
            9. Wait for Active@ BootDisk Suite v9.1.0.1 to load and choose between DOS or Windows operating systems.
            10. -
            11. Select the utility you want to use from the main menu and follow the instructions on the screen.
            12. -
            - -

            That's it! You can now use Active@ BootDisk Suite v9.1.0.1 to perform various tasks on your PC without affecting your installed operating system.

            - -

            What Are the System Requirements for Active@ BootDisk Suite v9.1.0.1?

            - -

            Active@ BootDisk Suite v9.1.0.1 has very low system requirements and can run on almost any PC that supports booting from CD/DVD or USB flash drive. Here are the minimum system requirements for Active@ BootDisk Suite v9.1.0.1:

            - -
              -
            • Processor: Intel Pentium III or higher
            • -
            • Memory: 512 MB RAM or higher
            • -
            • Disk space: 600 MB free disk space for installation
            • -
            • CD/DVD drive or USB port for bootable media
            • -
            - -

            Active@ BootDisk Suite v9.1.0.1 is compatible with Windows 11/10/8/7/Vista/XP/Server 2016/2012/2008/2003 operating systems.

            -

            What Are the Customer Reviews of Active@ BootDisk Suite v9.1.0.1?

            - -

            Active@ BootDisk Suite v9.1.0.1 has received many positive reviews from customers who have used it for various purposes. Here are some of the testimonials from satisfied users:

            - -
            -

            "I had a hard drive crash and I thought I lost all my data. I tried several data recovery software but none of them worked. Then I found Active@ BootDisk Suite v9.1.0.1 and it saved my life. It was able to recover all my files and folders from the damaged disk. It was very easy to use and fast. I highly recommend it to anyone who needs data recovery." - John Smith

            -
            - -
            -

            "I forgot my Windows password and I couldn't log in to my PC. I searched online for a solution and I came across Active@ BootDisk Suite v9.1.0.1. It was a lifesaver. It allowed me to boot up my PC from a USB flash drive and reset my password in minutes. It was very simple and effective. I would definitely recommend it to anyone who needs Windows password reset." - Jane Doe

            -
            - -
            -

            "I wanted to sell my old laptop but I was worried about my personal data being stolen by someone else. I decided to use Active@ BootDisk Suite v9.1.0.1 to erase and wipe my hard drive securely. It was very easy to use and it did a great job of deleting all my data beyond recovery. It gave me peace of mind knowing that my data was safe and secure." - Mike Jones

            -
            - -

            Why Should You Trust Active@ BootDisk Suite v9.1.0.1?

            - -

            Active@ BootDisk Suite v9.1.0.1 is a product of LSoft Technologies, a leading provider of data recovery, security, and utility software solutions since 1998.

            - -

            LSoft Technologies has a team of experienced and qualified developers who are constantly working on improving and updating their products to meet the needs and expectations of their customers.

            - -

            LSoft Technologies also offers excellent customer support and technical assistance for their products, ensuring that their customers get the best possible service and satisfaction.

            - -

            Active@ BootDisk Suite v9.1.0.1 is one of the most popular and trusted products of LSoft Technologies, with thousands of satisfied customers worldwide.

            -

            How to Uninstall Active@ BootDisk Suite v9.1.0.1?

            - -

            If you want to uninstall Active@ BootDisk Suite v9.1.0.1 from your PC, you can follow these steps:

            - -
              -
            1. Go to Control Panel and click on Programs and Features.
            2. -
            3. Find Active@ BootDisk Suite v9.1.0.1 in the list of installed programs and click on Uninstall.
            4. -
            5. Follow the instructions on the screen to complete the uninstallation process.
            6. -
            7. Restart your PC if prompted.
            8. -
            - -

            That's it! You have successfully uninstalled Active@ BootDisk Suite v9.1.0.1 from your PC.

            - -

            How to Buy Active@ BootDisk Suite v9.1.0.1?

            - -

            If you are interested in buying Active@ BootDisk Suite v9.1.0.1, you can do so from the official website of LSoft Technologies at here.

            - -

            You can choose between two license options: personal license or business license.

            - -

            The personal license costs $99.99 and allows you to use Active@ BootDisk Suite v9.1.0.1 on one PC for personal use only.

            - -

            The business license costs $149 and allows you to use Active@ BootDisk Suite v9.1.0.1 on unlimited PCs for commercial use.

            - -

            You can pay with various methods, such as credit card, PayPal, wire transfer, or check.

            - -

            You will receive an email with a download link and a registration key after completing your payment.

            - -

            You can also get a 30-day money-back guarantee if you are not satisfied with Active@ BootDisk Suite v9.1.0.1 for any reason.

            -

            How to Create a Bootable Media with Active@ BootDisk Suite v9.1.0.1?

            - -

            If you want to create a bootable media with Active@ BootDisk Suite v9.1.0.1, you can follow these steps:

            - -
              -
            1. Run the setup file of Active@ BootDisk Suite v9.1.0.1 and follow the instructions on the screen.
            2. -
            3. Select the option to create a bootable media and click Next.
            4. -
            5. Choose the type of media you want to use: CD/DVD or USB flash drive.
            6. -
            7. Insert a blank CD/DVD or USB flash drive into your PC and select it from the list of available devices.
            8. -
            9. Click Next and wait for the process to complete.
            10. -
            - -

            That's it! You have successfully created a bootable media with Active@ BootDisk Suite v9.1.0.1.

            - -

            How to Troubleshoot Active@ BootDisk Suite v9.1.0.1?

            - -

            If you encounter any problems or errors when using Active@ BootDisk Suite v9.1.0.1, you can try these troubleshooting tips:

            - -
              -
            • Make sure your PC supports booting from CD/DVD or USB flash drive and adjust the BIOS settings accordingly.
            • -
            • Make sure your bootable media is not corrupted or damaged and try to create a new one if necessary.
            • -
            • Make sure your PC meets the minimum system requirements for Active@ BootDisk Suite v9.1.0.1 and update your drivers if needed.
            • -
            • Make sure your PC is not infected by viruses or malware and scan it with a reliable antivirus software.
            • -
            • Contact LSoft Technologies support team via email or phone if none of the above tips work.
            • -
            - -

            LSoft Technologies provides excellent customer support and technical assistance for their products, ensuring that their customers get the best possible service and satisfaction.

            -

            Conclusion

            - -

            Active@ BootDisk Suite v9.1.0.1 is a powerful and versatile tool that can help you solve many PC problems and perform various tasks related to data recovery, system repair, data erasure, partition management, and more.

            - -

            If you are looking for a reliable and easy-to-use solution that can help you access your data and repair your PC from any external media, you should give Active@ BootDisk Suite v9.1.0.1 a try.

            - -

            You can download a freeware evaluation version from here or buy a full version from here.

            - -

            Active@ BootDisk Suite v9.1.0.1 is compatible with Windows 11/10/8/7/Vista/XP/Server 2016/2012/2008/2003 operating systems.

            - -

            LSoft Technologies provides excellent customer support and technical assistance for their products, ensuring that their customers get the best possible service and satisfaction.

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Air Combat Fighter PC Game Free Download !!INSTALL!!.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Air Combat Fighter PC Game Free Download !!INSTALL!!.md deleted file mode 100644 index 6cd23ce3a141de9d60b33fb99fa33a41eba47b1d..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Air Combat Fighter PC Game Free Download !!INSTALL!!.md +++ /dev/null @@ -1,12 +0,0 @@ -

            Air Combat Fighter PC Game Free Download


            Download Ziphttps://urlgoal.com/2uCJDN



            - -Take your plane to the city and aim for the skyscraper. - -Last but not least, when you’re done with the game, you can always return your toy to its box and you will be rewarded with a box for a nice toy.[Comparison of ultrasound and abdominal radiography in determining the accuracy of midline catheter insertion in chronic hemodialysis patients]. - -To investigate the accuracy of insertion of midline catheters in chronic hemodialysis (HD) patients, using abdominal radiography and ultrasound. Seventy-one midline catheters were inserted in 37 patients who had been on long-term chronic HD. To estimate the accuracy of catheter insertion, chest radiography, abdominal radiography and ultrasound examination were performed within 24 hours of catheter insertion. Ultrasound examination was performed using a high frequency linear array ultrasound machine with a high frequency transducer (5-12 MHz). The position of the catheter tip was classified into three groups: correct placement, if the tip was placed within the mid-axillary line between the anterior axillary line and the mid-axillary line, improper placement, if the tip was outside of this line, and malposition, if the tip was below the subclavian vein. The proportion of accurate catheter placement on chest radiography and abdominal radiography was 27.8% (95%CI: 20.7-35.0%) and 39.0% (95%CI: 30.4-47.8%), respectively. The proportion of malposition of catheter tip on ultrasound examination was 36.7% (95%CI: 26.2-48.5%). A significant difference was found between chest radiography and ultrasound (chi2=6.456, P=0.010). There was no significant difference between abdominal radiography and ultrasound (chi2=1.045, P=0.308). There was no significant difference between chest radiography and abdominal radiography in the accuracy of midline catheter placement in chronic HD patients (P>0.05). Ultrasound examination had a higher sensitivity for the detection of malpositioned catheters than did chest and abdominal radiography. Abdominal ultrasound was a reliable method for the assessment of the position of catheter tips in chronic HD patients.Authorities and musicians began clearing homeless campers from San Francisco's Civic Center plaza earlier this week. - -The "Move-In Day" operation on Tuesday was the first of 4fefd39f24
            -
            -
            -

            diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Csifatalconspiracyserialnumber.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Csifatalconspiracyserialnumber.md deleted file mode 100644 index 72cff9d03a3f875c085a77639a093a3820083528..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Csifatalconspiracyserialnumber.md +++ /dev/null @@ -1,6 +0,0 @@ -

            csifatalconspiracyserialnumber


            Download Zip ✑ ✑ ✑ https://urlgoal.com/2uCMLu



            -
            - 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/En El Vientre Materno [DVDRip]En El Vientre Materno [DVDRip].md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/En El Vientre Materno [DVDRip]En El Vientre Materno [DVDRip].md deleted file mode 100644 index b17fdce3109eed6eadd56d5dd51f9babd14f81d1..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/En El Vientre Materno [DVDRip]En El Vientre Materno [DVDRip].md +++ /dev/null @@ -1,10 +0,0 @@ - -

            En este artículo, quiero compartir con vosotros mi opinión sobre un documental que me ha fascinado. Se trata de En el vientre materno (Into the womb), una producción de National Geographic que nos muestra el milagroso proceso de gestación de diferentes seres vivos, desde humanos hasta animales. Aunque nunca he sido una gran aficionada a los documentales, sobre todo a los que se centran en la naturaleza y la vida salvaje, este me ha cautivado por la calidad de sus imágenes, la información que aporta y la emoción que transmite. No es el típico documental que te aburre con datos y escenas repetitivas, sino que te hace sentir parte de un viaje increíble por el interior del útero materno. No pretendo engañaros diciendo que veo documentales a menudo, porque sería una mentira. Tampoco soy de las que se duermen con La2 puesta. Pero este documental me ha hecho cambiar mi percepción sobre este género y me ha abierto la curiosidad por conocer más sobre el maravilloso mundo de la vida.

            -

            En el Vientre Materno [DVDRip]En el Vientre Materno [DVDRip]


            Download Filehttps://urlgoal.com/2uCKDd



            El documental se compone de varios episodios, cada uno dedicado a un tipo de embarazo diferente. El primero es el más general, y nos muestra el desarrollo del feto humano desde la concepción hasta el nacimiento, pasando por las distintas etapas y cambios que experimenta tanto el bebé como la madre. Es impresionante ver cómo se forma el corazón, el cerebro, los órganos, los sentidos y las extremidades del pequeño ser que crece dentro del vientre. También se explican los riesgos y complicaciones que pueden surgir durante el embarazo, así como las medidas que se pueden tomar para prevenirlos o solucionarlos.

            - -

            Los siguientes episodios se centran en los embarazos de animales, como elefantes, delfines, perros, leones, gatos, tiburones y avispas. Cada uno tiene sus particularidades y adaptaciones al medio en el que viven. Por ejemplo, el elefante tiene la gestación más larga de todos los mamíferos, 22 meses, y da a luz a una cría que pesa 120 kilos. El delfín tiene que salir a respirar cada pocos minutos, incluso cuando está en el útero de su madre. El perro y el lobo comparten un antepasado común, pero han evolucionado de forma diferente según su relación con los humanos. El tiburón tiene una forma de reproducción muy violenta, en la que los embriones se devoran entre sí dentro del vientre de la madre. La avispa utiliza a otros insectos como huéspedes para depositar sus huevos y alimentar a sus larvas.

            - -

            El último episodio trata sobre los embarazos múltiples y las historias de supervivencia que conllevan. Se muestra cómo se forman los mellizos idénticos a partir de un solo óvulo fecundado que se divide en dos. También se explica cómo se desarrollan los mellizos fraternos a partir de dos óvulos fecundados por dos espermatozoides diferentes. Se presentan casos reales de embarazos múltiples que tuvieron dificultades o complicaciones, como el síndrome de transfusión feto-fetal o la prematuridad extrema. Se destaca la importancia de la medicina y la tecnología para ayudar a estos bebés a sobrevivir y crecer sanos.

            -

            d5da3c52bf
            -
            -
            \ No newline at end of file diff --git a/spaces/renumics/cifar10-outlier/Dockerfile b/spaces/renumics/cifar10-outlier/Dockerfile deleted file mode 100644 index ffacf85bc7801cb4255cb8d3b0c868c625c34a57..0000000000000000000000000000000000000000 --- a/spaces/renumics/cifar10-outlier/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM python:3.9 - -WORKDIR /code -ENV HOME=/code - -RUN apt install curl -RUN pip install pip -U - -RUN pip install pip install "pydantic==1.10.8" renumics-spotlight==1.2.0rc2 - -RUN pip install datasets -COPY prepare.py . -RUN python prepare.py - -COPY . . -RUN mkdir -p /code/.cache -RUN chmod -R 777 /code -CMD ["python", "run.py"] diff --git a/spaces/renumics/cifar100-sliceguard-demo/README.md b/spaces/renumics/cifar100-sliceguard-demo/README.md deleted file mode 100644 index 962b6006d76b16eabde49a683460d4efc546233d..0000000000000000000000000000000000000000 --- a/spaces/renumics/cifar100-sliceguard-demo/README.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Explore data slices in the CIFAR-100 benchmark -emoji: 📊 -colorFrom: gray -colorTo: blue -sdk: docker -pinned: false -license: mit -app_file: run.py -datasets: -- renumics/cifar100-enriched -- cifar100 -tags: -- renumics -- spotlight -- sliceline -- data-centric-ai -duplicated_from: renumics/cifar100-sliceline-demo ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/rgres/Seg2Sat/frontend/.svelte-kit/output/client/_app/immutable/chunks/index-bcf2726a.js b/spaces/rgres/Seg2Sat/frontend/.svelte-kit/output/client/_app/immutable/chunks/index-bcf2726a.js deleted file mode 100644 index 2d47b275bdcb23c7324444798fdc9687822aeb28..0000000000000000000000000000000000000000 --- a/spaces/rgres/Seg2Sat/frontend/.svelte-kit/output/client/_app/immutable/chunks/index-bcf2726a.js +++ /dev/null @@ -1 +0,0 @@ -function N(){}function H(t,n){for(const e in n)t[e]=n[e];return t}function B(t){return t()}function M(){return Object.create(null)}function p(t){t.forEach(B)}function I(t){return typeof t=="function"}function lt(t,n){return t!=t?n==n:t!==n||t&&typeof t=="object"||typeof t=="function"}let g;function ot(t,n){return g||(g=document.createElement("a")),g.href=n,t===g.href}function W(t){return Object.keys(t).length===0}function G(t,...n){if(t==null)return N;const e=t.subscribe(...n);return e.unsubscribe?()=>e.unsubscribe():e}function st(t,n,e){t.$$.on_destroy.push(G(n,e))}function at(t,n,e,i){if(t){const c=L(t,n,e,i);return t[0](c)}}function L(t,n,e,i){return t[1]&&i?H(e.ctx.slice(),t[1](i(n))):e.ctx}function ft(t,n,e,i){if(t[2]&&i){const c=t[2](i(e));if(n.dirty===void 0)return c;if(typeof c=="object"){const s=[],u=Math.max(n.dirty.length,c.length);for(let l=0;l32){const n=[],e=t.ctx.length/32;for(let i=0;i>1);e(c)<=i?t=c+1:n=c}return t}function R(t){if(t.hydrate_init)return;t.hydrate_init=!0;let n=t.childNodes;if(t.nodeName==="HEAD"){const r=[];for(let o=0;o0&&n[e[c]].claim_order<=o?c+1:Q(1,c,y=>n[e[y]].claim_order,o))-1;i[r]=e[f]+1;const a=f+1;e[a]=r,c=Math.max(a,c)}const s=[],u=[];let l=n.length-1;for(let r=e[c]+1;r!=0;r=i[r-1]){for(s.push(n[r-1]);l>=r;l--)u.push(n[l]);l--}for(;l>=0;l--)u.push(n[l]);s.reverse(),u.sort((r,o)=>r.claim_order-o.claim_order);for(let r=0,o=0;r=s[o].claim_order;)o++;const f=ot.removeEventListener(n,e,i)}function xt(t){return function(n){return n.preventDefault(),t.call(this,n)}}function $t(t,n,e){e==null?t.removeAttribute(n):t.getAttribute(n)!==e&&t.setAttribute(n,e)}function wt(t){return t===""?null:+t}function Z(t){return Array.from(t.childNodes)}function tt(t){t.claim_info===void 0&&(t.claim_info={last_index:0,total_claimed:0})}function O(t,n,e,i,c=!1){tt(t);const s=(()=>{for(let u=t.claim_info.last_index;u=0;u--){const l=t[u];if(n(l)){const r=e(l);return r===void 0?t.splice(u,1):t[u]=r,c?r===void 0&&t.claim_info.last_index--:t.claim_info.last_index=u,l}}return i()})();return s.claim_order=t.claim_info.total_claimed,t.claim_info.total_claimed+=1,s}function P(t,n,e,i){return O(t,c=>c.nodeName===n,c=>{const s=[];for(let u=0;uc.removeAttribute(u))},()=>i(n))}function vt(t,n,e){return P(t,n,e,X)}function Et(t,n,e){return P(t,n,e,Y)}function nt(t,n){return O(t,e=>e.nodeType===3,e=>{const i=""+n;if(e.data.startsWith(i)){if(e.data.length!==i.length)return e.splitText(i.length)}else e.data=i},()=>j(n),!0)}function kt(t){return nt(t," ")}function Nt(t,n){n=""+n,t.wholeText!==n&&(t.data=n)}function jt(t,n){t.value=n==null?"":n}function St(t,n,e,i){e===null?t.style.removeProperty(n):t.style.setProperty(n,e,i?"important":"")}let m;function h(t){m=t}function S(){if(!m)throw new Error("Function called outside component initialization");return m}function At(t){S().$$.on_mount.push(t)}function Ct(t){S().$$.after_update.push(t)}function Mt(t,n){return S().$$.context.set(t,n),n}const d=[],T=[],x=[],q=[],D=Promise.resolve();let E=!1;function z(){E||(E=!0,D.then(F))}function Tt(){return z(),D}function k(t){x.push(t)}const v=new Set;let b=0;function F(){const t=m;do{for(;b{$.delete(t),i&&(e&&t.d(1),i())}),t.o(n)}}function Ot(t,n){const e={},i={},c={$$scope:1};let s=t.length;for(;s--;){const u=t[s],l=n[s];if(l){for(const r in u)r in l||(i[r]=1);for(const r in l)c[r]||(e[r]=l[r],c[r]=1);t[s]=l}else for(const r in u)c[r]=1}for(const u in i)u in e||(e[u]=void 0);return e}function Pt(t){return typeof t=="object"&&t!==null?t:{}}function Dt(t){t&&t.c()}function zt(t,n){t&&t.l(n)}function rt(t,n,e,i){const{fragment:c,on_mount:s,on_destroy:u,after_update:l}=t.$$;c&&c.m(n,e),i||k(()=>{const r=s.map(B).filter(I);u?u.push(...r):p(r),t.$$.on_mount=[]}),l.forEach(k)}function ct(t,n){const e=t.$$;e.fragment!==null&&(p(e.on_destroy),e.fragment&&e.fragment.d(n),e.on_destroy=e.fragment=null,e.ctx=[])}function ut(t,n){t.$$.dirty[0]===-1&&(d.push(t),z(),t.$$.dirty.fill(0)),t.$$.dirty[n/31|0]|=1<{const C=A.length?A[0]:y;return o.ctx&&c(o.ctx[a],o.ctx[a]=C)&&(!o.skip_bound&&o.bound[a]&&o.bound[a](C),f&&ut(t,a)),y}):[],o.update(),f=!0,p(o.before_update),o.fragment=i?i(o.ctx):!1,n.target){if(n.hydrate){J();const a=Z(n.target);o.fragment&&o.fragment.l(a),a.forEach(V)}else o.fragment&&o.fragment.c();n.intro&&it(t.$$.fragment),rt(t,n.target,n.anchor,n.customElement),K(),F()}h(r)}class Ht{$destroy(){ct(this,1),this.$destroy=N}$on(n,e){const i=this.$$.callbacks[n]||(this.$$.callbacks[n]=[]);return i.push(e),()=>{const c=i.indexOf(e);c!==-1&&i.splice(c,1)}}$set(n){this.$$set&&!W(n)&&(this.$$.skip_bound=!0,this.$$set(n),this.$$.skip_bound=!1)}}export{Pt as A,ct as B,H as C,Tt as D,N as E,at as F,_t as G,dt as H,ft as I,U as J,ot as K,bt as L,pt as M,st as N,ht as O,Y as P,Et as Q,jt as R,Ht as S,xt as T,p as U,wt as V,T as W,Z as a,$t as b,vt as c,V as d,X as e,St as f,mt as g,nt as h,Ft as i,Nt as j,yt as k,gt as l,kt as m,qt as n,Lt as o,Bt as p,it as q,Mt as r,lt as s,j as t,Ct as u,At as v,Dt as w,zt as x,rt as y,Ot as z}; diff --git a/spaces/riyueyiming/gpt/run_Linux.sh b/spaces/riyueyiming/gpt/run_Linux.sh deleted file mode 100644 index 62af07283093d8e580763d7acfe493c3d88e7b08..0000000000000000000000000000000000000000 --- a/spaces/riyueyiming/gpt/run_Linux.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$0") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git "a/spaces/rizam/literature-research-tool/documents/docs/2-\346\200\273\347\273\223\345\212\237\350\203\275.md" "b/spaces/rizam/literature-research-tool/documents/docs/2-\346\200\273\347\273\223\345\212\237\350\203\275.md" deleted file mode 100644 index 49d4ff26f17f5265068e18aad83054bb9d6e1ced..0000000000000000000000000000000000000000 --- "a/spaces/rizam/literature-research-tool/documents/docs/2-\346\200\273\347\273\223\345\212\237\350\203\275.md" +++ /dev/null @@ -1,19 +0,0 @@ -# 2 Research Trends Summarization - -## Model Architecture -![](https://i.imgur.com/Lv8um1V.png) - -### 1 Baseline Configuration -1. pre-trained language model: `sentence-transformers/all-MiniLM-L6-v2` -2. dimension reduction: `None` -3. clustering algorithms: `kmeans` -4. keywords extraction model: `keyphrase-transformer` - -[[example run](https://github.com/Mondkuchen/idp_LiteratureResearch_Tool/blob/main/example_run.py)] [[results](https://github.com/Mondkuchen/idp_LiteratureResearch_Tool/blob/main/examples/IDP.ipynb)] - - -### TODO: -1. clustering: using other clustering algorithms such as Gausian Mixture Model (GMM) -2. keywords extraction model: train another model -3. add dimension reduction -4. better PLM: sentence-transformers/sentence-t5-xxl diff --git a/spaces/rorallitri/biomedical-language-models/logs/Dawn Of The Dead 2004 Bluray 1080p X264 17 Experience The Apocalypse Like Never Before.md b/spaces/rorallitri/biomedical-language-models/logs/Dawn Of The Dead 2004 Bluray 1080p X264 17 Experience The Apocalypse Like Never Before.md deleted file mode 100644 index d5854efc4b07dacc1832452c764034850801ede9..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Dawn Of The Dead 2004 Bluray 1080p X264 17 Experience The Apocalypse Like Never Before.md +++ /dev/null @@ -1,5 +0,0 @@ - -

            George Romero's 1978 classic Dawn of the Dead is probably the greatest zombie movie of all time, as well as one of the best horror movies of the last 30 years. Set mainly in a suburban mall, it was as much a critique/satire on rampant consumerism as it was a chilling gore-fest. Though our country's capitalistic nature has only escalated in the last 25 years, the satirical content has been slashed in Zack Snyder's 2004 remake -- but the zombies are more ferocious than ever. Unlike your parents' undead, lumbering around aimlessly in search of brains, these zombies are fast, mean, and -- relatively speaking -- smart. Otherwise, this hi-tech update sticks pretty close to the source material. A visceral, violent pre-credit sequence brilliantly sets the stage: A strange plague infects much of the population overnight, causing the dead to rise and attack the living. Those not infected manage to hole up in a mall while the bloodthirsty corpses converge outside like shoppers on the day after Thanksgiving. Among the group of survivors are Sarah Polley, Ving Rhames, Mekhi Phifer, and Jake Weber. Who are they and what do they do? They fight zombies, of course -- and therein lies the problem with this remake. The script is nearly as mindless as the zombies, heading down the predictable path of offing the leads one by one as they fight to escape. Snyder is a skilled visual stylist, however, and makes up for it with sheer thrills. Adrenaline levels run high the entire film, and the jolts come fast and furious with plenty of squeamish delights for gore fans. With a little more care and attention to story, Dawn of the Dead could have truly rivaled the original; instead we merely get a real good scare.

            -

            Dawn Of The Dead 2004 Bluray 1080p X264 17


            DOWNLOAD === https://tinurll.com/2uzmZ8



            aaccfb2cb3
            -
            -
            \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Facebook Download For Mac Os X How to Download Install and Update the App on Your Mac.md b/spaces/rorallitri/biomedical-language-models/logs/Facebook Download For Mac Os X How to Download Install and Update the App on Your Mac.md deleted file mode 100644 index 801ba1dde234d97694eb3fabecc4778951de384e..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Facebook Download For Mac Os X How to Download Install and Update the App on Your Mac.md +++ /dev/null @@ -1,35 +0,0 @@ - -

            Freeware programs can be downloaded used free of charge and without any time limitations. Freeware products can be used free of charge for both personal and professional (commercial use).

            -

            This license is commonly used for video games and it allows users to download and play the game for free. Basically, a product is offered Free to Play (Freemium) and the user can decide if he wants to pay the money (Premium) for additional features, services, virtual or physical goods that expand the functionality of the game. In some cases, ads may be show to the users.

            -

            Facebook Download For Mac Os X


            DOWNLOAD 🆗 https://tinurll.com/2uzovo



            -

            This software is no longer available for the download. This could be due to the program being discontinued, having a security issue or for other reasons.

            -

            About the download, Facebook Desktop is a not that heavy software that will not require as much space than the average program in the category Internet software. It's a program frequently downloaded in United Kingdom, Netherlands, and Portugal.

            -

            Potentially, if it installs correctly, it functions the same as the Koobface worm running on Windows. It runs a local web server and an IRC server, acts as part of a botnet, acts as a DNS changer, and can activate a number of other functions, either through files initially installed or other files downloaded subsequently. It spreads by posting messages on Facebook, MySpace and Twitter, usually trying to get people to click a link to view some sort of video.

            -

            A loved one recorded an audio message on Facebook messenger for me; it looks like the image below. I'm trying to download it, but nearly all of the ways to capture it don't see that the file exists, and the one exception to that downloaded a file that nothing can open.

            -

            Access this url via chrome and log in -> Select your message that the audio file is in -> Go to file section -> In chrome there should be a download image on the right side (end of the audio file lin) which should allow you to download it.

            -

            You may want to watch FaceBook videos offline on Mac computers to avoid any interenet issues. Now, you can get FaceBook saved on your Macbook Pro, Air, or iMac locally if you have a video downloader for FaceBook installed.

            -

            What make things a bit awkward is that there's few FaceBook video downloaders for Mac available yet. Give then, we handpick a list of best video downloaders that work well with your Mac, with which you can free download FaceBook videos in HD or 4K on macOS seamlessly, based on reviews and testimonials.

            -

            -

            MacX Video Converter Pro is the fastest program to free download FB videos on macOS on the list - it only takes several seconds to download a 5-minute FaceBook video. 8K, 4K UHD, 1080P, 720P SD, VR 360, 3D Blu-ray, MP4, FLV, AVI etc. video downloads is all supported, with the highest quality kept.

            -

            The video downloader for Mac also serves as a FaceBook video converter that is able to download FaceBook video on MacBook Pro/Air/iMac to MP3, MP4, AVI, 3GP, MOV, WMV at the same time. The FB downloader has clutter-free interface, making it simple to free download FaceBook videos HD 4K on macOS.

            -

            Plenty many free FaceBook downloading apps for Mac nags you until you cough up money to buy it, or have ads, virus, spam. But MacX YouTube Downloader is clean and safe video downloader freeware for FaceBook vdieos. To download FaceBook video on Mac, just copy and paste video URL.

            -

            It delivers the same performances as the fastest FaceBook downloader for Mac, only with limited options. With the free FaceBook video downloader for Mac in hand, you can even save FaceBook VR 360, 4k/8K UHD videos freely. But there's no FB video conversion function.

            -

            If you don't want to bother yourself to download software on Mac, here's another way to go: use online FaceBook videos downloader for new macOS. The online FaceBook video saver allows you to select FB video quality, 360p 480P 720P SD, 1080p HD, 4K, 8K and VR 360 is not said to be supported on its official site. To download FaceBook video on Mac, add-on/extensive is required to be installed on the new system to catch FB video successfully. Besides, it also serves as a good YouTube downloader.

            -

            Note: the free online FB video downloader for Mac shows many puzzling download buttons on its main interface, most of which are fake FB video download links. Check the file extensive before you download and save FB video movie clips on Mac.

            -

            It works OK in FaceBook video free downloads on Mac, but not great. In fact, Miro is music video player. The free FB downloader lets you save videos from FaceBook via torrents, bittorrent(maybe illegal in your country). So, it's not safe when it comes to FaceBook video torrent downloads.

            -

            The best part of the FaceBook downloader for Mac is that it's open source and being updated from time to time. In addition, it can play downloaded FaceBook videos music movie clips. But it sounds not that appealing as it's risky. It's the last way to go.

            -

            Which is the best program to free download FB videos on Mac? MacX YouTube Downloader is free and fast but limited in function. Catchvideo and Miro can do the FaceBook video free downloads job on macOS but they are likely to throw out virus, malware. MacX Video Converter Pro is rich in features (download, convert, edit), fast in downloading speed and the most reliable FaceBook 4K/HD video downloader for Mac here. You can follow the steps below to download and convert Facebook video to MP4 or Mp3 on Mac.

            -

            Step 1: Free download the FB downloader on mac and launch it. Click "YouTube" icon on the main interface, yes, YouTube, which will direct you to FaceBook video download page. Click "Add Link" button, copy and paste FaceBook video URL to the box frame, then click on "analyze" to load the FB video info.

            -

            After the FB video downloads on Mac is finished and the "Auto add to convert list" option is selected before (step 1 can be ignored then), the FaceBook downloader for Mac can download and convert FaceBook video to MP4, FB to MP3, AVI, MOV, MKV, WMV, FLV, etc. any format or directly to YouTube video format, FB video format, iPhone, iPad, iPod, Samsung Galaxy etc.

            -

            The Facebook HD video downloader for Mac delivers easy ways to edit FaceBook videos, for example, compressing video file size, adding SRT subtitles/watermarks, adjusting video parameters, frame rate, bit rate, aspect ratio, and more.

            -

            Requirements: BlueJeans Relay download package (software and documentation), provisioned account, on-premise server, calendar service (Exchange 2010/2013/2016/2019, Office 365, or Google Calendar), Android tablet (4.2 and later) for each configured conference room, Relay Touch app. Please refer to the Getting Started guide for setup instructions.

            -

            Requirements: BlueJeans Relay download package (software and documentation), provisioned account, on-premise server, calendar service (Exchange 2010/2013/2016, Office 365, or Google Calendar), Android tablet (4.2 and later) for each configured conference room, Relay Touch app. Please refer to the Getting Started guide for setup instructions.

            -

            Skype and Facebook are both Internet-based services that you can access from your MacBook. Facebook does offer a separate application, and you access Facebook directly from your MacBook's web browser. Skype, on the other hand, requires the use of a free application that you can download and install on your MacBook directly from Skype's website.

            -

            Subscribe to the MacRumors YouTube channel for more videos.
            Some iOS developers have been updating their apps to make them compatible with macOS, but there are ways to download apps that haven't been optimized and even apps that developers have prevented from being available on macOS. Watch our YouTube video for instructions, or read through the steps below.

            -

            Any chats, including their settings and any files, GIFs and images are downloaded and sync automatically. So, if you have a particular group chat muted because it's just too busy and distracting, that group chat will be muted in the desktop app without you needing to mute it again.

            -

            When you buy the program you are immediately emailed a download link and product license code. Open that email on your Mac or PC and install Decipher Messenger Export onto your desktop or laptop computer. You can also download Decipher Messenger Export to your computer from our website at any time.

            -

            You can install FBReader from the Mac App Store or download it directly from this site. The packages are slightly different: the App Store version excludes direct ebook purchases according to the Apple requirements.

            -

            You can either install FBReader from the Mac App Store or download the package from this site. The versions are slightly different: the App Store variant scratches out ebook purchases as the Apple guidelines require.

            -

            Get the latest Nmap for your system:

            • Windows
            • macOS
            • Linux (RPM)
            • Any other OS (source code)
            Older versions (and sometimes newer testreleases) are available from the Nmap release archive(and really old ones are in dist-old).For the moresecurity-paranoid (smart) users, GPG detached signatures and SHA-1hashes for each release are available in the sigsdirectory (verification instructions). Before downloading, be sure to read the relevant sections for your platform from the Nmap Install Guide. The mostimportant changes (features, bugfixes, etc) in each Nmap version aredescribed in the Changelog. Using Nmap is covered in the Reference Guide, and don't forget to readthe other available documentation, particularly the official book Nmap Network Scanning!Nmap users are encouraged to subscribe to the Nmap-hackersmailing list. It is a low volume (7 posts in 2015), moderated listfor the most important announcements about Nmap, Insecure.org, andrelated projects. You can join the 128,953 current subscribers (as ofSeptember 2017) by submitting your email address here:
            (or subscribe with custom options from the Nmap-hackers list info page)

            aaccfb2cb3
            -
            -
            \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/HD Online Player (Kaante Movie Download In Hindi 720p ).md b/spaces/rorallitri/biomedical-language-models/logs/HD Online Player (Kaante Movie Download In Hindi 720p ).md deleted file mode 100644 index 758d51f44128f3058b73541eca40953ce34755e7..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/HD Online Player (Kaante Movie Download In Hindi 720p ).md +++ /dev/null @@ -1,6 +0,0 @@ -

            HD Online Player (Kaante movie download in hindi 720p )


            Download ››››› https://tinurll.com/2uzmX1



            -
            -Download Seberg 2020 Full Movies (2019) Hindi Movie 720p Download Full Khatrimaza ... 300Mb Dual Audio Hindi Dubbed HD Movies Tags: Download watch Full Movie in hd mkv ... Bigg Boss (2020) HDTV Telugu Season 4 Day - 41 Movie Watch Online Free. ... Watch Maari 2 (2019) Hindi Dubbed From Player 1 Below. 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/rosenthal/chess/chessfenbot/Dockerfile b/spaces/rosenthal/chess/chessfenbot/Dockerfile deleted file mode 100644 index b80d86e9086c87d4020688cedf344c472e880254..0000000000000000000000000000000000000000 --- a/spaces/rosenthal/chess/chessfenbot/Dockerfile +++ /dev/null @@ -1,31 +0,0 @@ -FROM tensorflow/tensorflow -MAINTAINER Sam - -# Install python and pip and use pip to install the python reddit api PRAW -RUN apt-get -y update && apt-get install -y \ - python-dev \ - libxml2-dev \ - libxslt1-dev \ - libjpeg-dev \ - vim \ - && apt-get clean - -# Install python reddit api related files -RUN pip install praw==4.3.0 beautifulsoup4==4.4.1 lxml==3.3.3 Pillow==4.0.0 html5lib==1.0b8 - -# Clean up APT when done. -RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - -# Remove jupyter related files -RUN rm -rf /notebooks /run_jupyter.sh - -# Copy code over -COPY . /tcb/ - -WORKDIR /tcb - -# Run chessbot by default -CMD ["/tcb/run_chessbot.sh"] - -# Start up the docker instance with the proper auth file using -# $ docker run -dt --rm --name cfb -v :/tcb/auth_config.py elucidation/tensorflow_chessbot diff --git a/spaces/rstallman/Mayfair-Partner-Music/audiocraft/data/audio.py b/spaces/rstallman/Mayfair-Partner-Music/audiocraft/data/audio.py deleted file mode 100644 index 2048df6f175d7303bcf5c7b931922fd297908ead..0000000000000000000000000000000000000000 --- a/spaces/rstallman/Mayfair-Partner-Music/audiocraft/data/audio.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Audio IO methods are defined in this module (info, read, write), -We rely on av library for faster read when possible, otherwise on torchaudio. -""" - -from dataclasses import dataclass -from pathlib import Path -import logging -import typing as tp - -import numpy as np -import soundfile -import torch -from torch.nn import functional as F -import torchaudio as ta - -import av - -from .audio_utils import f32_pcm, i16_pcm, normalize_audio - - -_av_initialized = False - - -def _init_av(): - global _av_initialized - if _av_initialized: - return - logger = logging.getLogger('libav.mp3') - logger.setLevel(logging.ERROR) - _av_initialized = True - - -@dataclass(frozen=True) -class AudioFileInfo: - sample_rate: int - duration: float - channels: int - - -def _av_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: - _init_av() - with av.open(str(filepath)) as af: - stream = af.streams.audio[0] - sample_rate = stream.codec_context.sample_rate - duration = float(stream.duration * stream.time_base) - channels = stream.channels - return AudioFileInfo(sample_rate, duration, channels) - - -def _soundfile_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: - info = soundfile.info(filepath) - return AudioFileInfo(info.samplerate, info.duration, info.channels) - - -def audio_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: - # torchaudio no longer returns useful duration informations for some formats like mp3s. - filepath = Path(filepath) - if filepath.suffix in ['.flac', '.ogg']: # TODO: Validate .ogg can be safely read with av_info - # ffmpeg has some weird issue with flac. - return _soundfile_info(filepath) - else: - return _av_info(filepath) - - -def _av_read(filepath: tp.Union[str, Path], seek_time: float = 0, duration: float = -1.) -> tp.Tuple[torch.Tensor, int]: - """FFMPEG-based audio file reading using PyAV bindings. - Soundfile cannot read mp3 and av_read is more efficient than torchaudio. - - Args: - filepath (str or Path): Path to audio file to read. - seek_time (float): Time at which to start reading in the file. - duration (float): Duration to read from the file. If set to -1, the whole file is read. - Returns: - Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate - """ - _init_av() - with av.open(str(filepath)) as af: - stream = af.streams.audio[0] - sr = stream.codec_context.sample_rate - num_frames = int(sr * duration) if duration >= 0 else -1 - frame_offset = int(sr * seek_time) - # we need a small negative offset otherwise we get some edge artifact - # from the mp3 decoder. - af.seek(int(max(0, (seek_time - 0.1)) / stream.time_base), stream=stream) - frames = [] - length = 0 - for frame in af.decode(streams=stream.index): - current_offset = int(frame.rate * frame.pts * frame.time_base) - strip = max(0, frame_offset - current_offset) - buf = torch.from_numpy(frame.to_ndarray()) - if buf.shape[0] != stream.channels: - buf = buf.view(-1, stream.channels).t() - buf = buf[:, strip:] - frames.append(buf) - length += buf.shape[1] - if num_frames > 0 and length >= num_frames: - break - assert frames - # If the above assert fails, it is likely because we seeked past the end of file point, - # in which case ffmpeg returns a single frame with only zeros, and a weird timestamp. - # This will need proper debugging, in due time. - wav = torch.cat(frames, dim=1) - assert wav.shape[0] == stream.channels - if num_frames > 0: - wav = wav[:, :num_frames] - return f32_pcm(wav), sr - - -def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0., - duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]: - """Read audio by picking the most appropriate backend tool based on the audio format. - - Args: - filepath (str or Path): Path to audio file to read. - seek_time (float): Time at which to start reading in the file. - duration (float): Duration to read from the file. If set to -1, the whole file is read. - pad (bool): Pad output audio if not reaching expected duration. - Returns: - Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate. - """ - fp = Path(filepath) - if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg - # There is some bug with ffmpeg and reading flac - info = _soundfile_info(filepath) - frames = -1 if duration <= 0 else int(duration * info.sample_rate) - frame_offset = int(seek_time * info.sample_rate) - wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32) - assert info.sample_rate == sr, f"Mismatch of sample rates {info.sample_rate} {sr}" - wav = torch.from_numpy(wav).t().contiguous() - if len(wav.shape) == 1: - wav = torch.unsqueeze(wav, 0) - elif ( - fp.suffix in ['.wav', '.mp3'] and fp.suffix[1:] in ta.utils.sox_utils.list_read_formats() - and duration <= 0 and seek_time == 0 - ): - # Torchaudio is faster if we load an entire file at once. - wav, sr = ta.load(fp) - else: - wav, sr = _av_read(filepath, seek_time, duration) - if pad and duration > 0: - expected_frames = int(duration * sr) - wav = F.pad(wav, (0, expected_frames - wav.shape[-1])) - return wav, sr - - -def audio_write(stem_name: tp.Union[str, Path], - wav: torch.Tensor, sample_rate: int, - format: str = 'wav', mp3_rate: int = 320, normalize: bool = True, - strategy: str = 'peak', peak_clip_headroom_db: float = 1, - rms_headroom_db: float = 18, loudness_headroom_db: float = 14, - loudness_compressor: bool = False, - log_clipping: bool = True, make_parent_dir: bool = True, - add_suffix: bool = True) -> Path: - """Convenience function for saving audio to disk. Returns the filename the audio was written to. - - Args: - stem_name (str or Path): Filename without extension which will be added automatically. - format (str): Either "wav" or "mp3". - mp3_rate (int): kbps when using mp3s. - normalize (bool): if `True` (default), normalizes according to the prescribed - strategy (see after). If `False`, the strategy is only used in case clipping - would happen. - strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak', - i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square - with extra headroom to avoid clipping. 'clip' just clips. - peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy. - rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger - than the `peak_clip` one to avoid further clipping. - loudness_headroom_db (float): Target loudness for loudness normalization. - loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'. - when strategy is 'loudness'log_clipping (bool): If True, basic logging on stderr when clipping still - occurs despite strategy (only for 'rms'). - make_parent_dir (bool): Make parent directory if it doesn't exist. - Returns: - Path: Path of the saved audio. - """ - assert wav.dtype.is_floating_point, "wav is not floating point" - if wav.dim() == 1: - wav = wav[None] - elif wav.dim() > 2: - raise ValueError("Input wav should be at most 2 dimension.") - assert wav.isfinite().all() - wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db, - rms_headroom_db, loudness_headroom_db, log_clipping=log_clipping, - sample_rate=sample_rate, stem_name=str(stem_name)) - kwargs: dict = {} - if format == 'mp3': - suffix = '.mp3' - kwargs.update({"compression": mp3_rate}) - elif format == 'wav': - wav = i16_pcm(wav) - suffix = '.wav' - kwargs.update({"encoding": "PCM_S", "bits_per_sample": 16}) - else: - raise RuntimeError(f"Invalid format {format}. Only wav or mp3 are supported.") - if not add_suffix: - suffix = '' - path = Path(str(stem_name) + suffix) - if make_parent_dir: - path.parent.mkdir(exist_ok=True, parents=True) - try: - ta.save(path, wav, sample_rate, **kwargs) - except Exception: - if path.exists(): - # we do not want to leave half written files around. - path.unlink() - raise - return path diff --git a/spaces/ruboin/faster-whisper-webui/cli.py b/spaces/ruboin/faster-whisper-webui/cli.py deleted file mode 100644 index 70c08138c9274c3576d28356e53f3d94a9968a2e..0000000000000000000000000000000000000000 --- a/spaces/ruboin/faster-whisper-webui/cli.py +++ /dev/null @@ -1,173 +0,0 @@ -import argparse -import os -import pathlib -from urllib.parse import urlparse -import warnings -import numpy as np - -import torch -from app import VadOptions, WhisperTranscriber -from src.config import ApplicationConfig, VadInitialPromptMode -from src.download import download_url -from src.languages import get_language_names - -from src.utils import optional_float, optional_int, str2bool -from src.whisper.whisperFactory import create_whisper_container - -def cli(): - app_config = ApplicationConfig.create_default() - whisper_models = app_config.get_model_names() - - # For the CLI, we fallback to saving the output to the current directory - output_dir = app_config.output_dir if app_config.output_dir is not None else "." - - # Environment variable overrides - default_whisper_implementation = os.environ.get("WHISPER_IMPLEMENTATION", app_config.whisper_implementation) - - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("audio", nargs="+", type=str, \ - help="audio file(s) to transcribe") - parser.add_argument("--model", default=app_config.default_model_name, choices=whisper_models, \ - help="name of the Whisper model to use") # medium - parser.add_argument("--model_dir", type=str, default=app_config.model_dir, \ - help="the path to save model files; uses ~/.cache/whisper by default") - parser.add_argument("--device", default=app_config.device, \ - help="device to use for PyTorch inference") - parser.add_argument("--output_dir", "-o", type=str, default=output_dir, \ - help="directory to save the outputs") - parser.add_argument("--verbose", type=str2bool, default=app_config.verbose, \ - help="whether to print out the progress and debug messages") - parser.add_argument("--whisper_implementation", type=str, default=default_whisper_implementation, choices=["whisper", "faster-whisper"],\ - help="the Whisper implementation to use") - - parser.add_argument("--task", type=str, default=app_config.task, choices=["transcribe", "translate"], \ - help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')") - parser.add_argument("--language", type=str, default=app_config.language, choices=sorted(get_language_names()), \ - help="language spoken in the audio, specify None to perform language detection") - - parser.add_argument("--vad", type=str, default=app_config.default_vad, choices=["none", "silero-vad", "silero-vad-skip-gaps", "silero-vad-expand-into-gaps", "periodic-vad"], \ - help="The voice activity detection algorithm to use") # silero-vad - parser.add_argument("--vad_initial_prompt_mode", type=str, default=app_config.vad_initial_prompt_mode, choices=["prepend_all_segments", "prepend_first_segment"], \ - help="Whether or not to prepend the initial prompt to each VAD segment (prepend_all_segments), or just the first segment (prepend_first_segment)") # prepend_first_segment - parser.add_argument("--vad_merge_window", type=optional_float, default=app_config.vad_merge_window, \ - help="The window size (in seconds) to merge voice segments") - parser.add_argument("--vad_max_merge_size", type=optional_float, default=app_config.vad_max_merge_size,\ - help="The maximum size (in seconds) of a voice segment") - parser.add_argument("--vad_padding", type=optional_float, default=app_config.vad_padding, \ - help="The padding (in seconds) to add to each voice segment") - parser.add_argument("--vad_prompt_window", type=optional_float, default=app_config.vad_prompt_window, \ - help="The window size of the prompt to pass to Whisper") - parser.add_argument("--vad_cpu_cores", type=int, default=app_config.vad_cpu_cores, \ - help="The number of CPU cores to use for VAD pre-processing.") # 1 - parser.add_argument("--vad_parallel_devices", type=str, default=app_config.vad_parallel_devices, \ - help="A commma delimited list of CUDA devices to use for parallel processing. If None, disable parallel processing.") # "" - parser.add_argument("--auto_parallel", type=bool, default=app_config.auto_parallel, \ - help="True to use all available GPUs and CPU cores for processing. Use vad_cpu_cores/vad_parallel_devices to specify the number of CPU cores/GPUs to use.") # False - - parser.add_argument("--temperature", type=float, default=app_config.temperature, \ - help="temperature to use for sampling") - parser.add_argument("--best_of", type=optional_int, default=app_config.best_of, \ - help="number of candidates when sampling with non-zero temperature") - parser.add_argument("--beam_size", type=optional_int, default=app_config.beam_size, \ - help="number of beams in beam search, only applicable when temperature is zero") - parser.add_argument("--patience", type=float, default=app_config.patience, \ - help="optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search") - parser.add_argument("--length_penalty", type=float, default=app_config.length_penalty, \ - help="optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple lengt normalization by default") - - parser.add_argument("--suppress_tokens", type=str, default=app_config.suppress_tokens, \ - help="comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations") - parser.add_argument("--initial_prompt", type=str, default=app_config.initial_prompt, \ - help="optional text to provide as a prompt for the first window.") - parser.add_argument("--condition_on_previous_text", type=str2bool, default=app_config.condition_on_previous_text, \ - help="if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop") - parser.add_argument("--fp16", type=str2bool, default=app_config.fp16, \ - help="whether to perform inference in fp16; True by default") - parser.add_argument("--compute_type", type=str, default=app_config.compute_type, choices=["default", "auto", "int8", "int8_float16", "int16", "float16", "float32"], \ - help="the compute type to use for inference") - - parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=app_config.temperature_increment_on_fallback, \ - help="temperature to increase when falling back when the decoding fails to meet either of the thresholds below") - parser.add_argument("--compression_ratio_threshold", type=optional_float, default=app_config.compression_ratio_threshold, \ - help="if the gzip compression ratio is higher than this value, treat the decoding as failed") - parser.add_argument("--logprob_threshold", type=optional_float, default=app_config.logprob_threshold, \ - help="if the average log probability is lower than this value, treat the decoding as failed") - parser.add_argument("--no_speech_threshold", type=optional_float, default=app_config.no_speech_threshold, \ - help="if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence") - - args = parser.parse_args().__dict__ - model_name: str = args.pop("model") - model_dir: str = args.pop("model_dir") - output_dir: str = args.pop("output_dir") - device: str = args.pop("device") - os.makedirs(output_dir, exist_ok=True) - - whisper_implementation = args.pop("whisper_implementation") - print(f"Using {whisper_implementation} for Whisper") - - if model_name.endswith(".en") and args["language"] not in {"en", "English"}: - warnings.warn(f"{model_name} is an English-only model but receipted '{args['language']}'; using English instead.") - args["language"] = "en" - - temperature = args.pop("temperature") - temperature_increment_on_fallback = args.pop("temperature_increment_on_fallback") - if temperature_increment_on_fallback is not None: - temperature = tuple(np.arange(temperature, 1.0 + 1e-6, temperature_increment_on_fallback)) - else: - temperature = [temperature] - - vad = args.pop("vad") - vad_initial_prompt_mode = args.pop("vad_initial_prompt_mode") - vad_merge_window = args.pop("vad_merge_window") - vad_max_merge_size = args.pop("vad_max_merge_size") - vad_padding = args.pop("vad_padding") - vad_prompt_window = args.pop("vad_prompt_window") - vad_cpu_cores = args.pop("vad_cpu_cores") - auto_parallel = args.pop("auto_parallel") - - compute_type = args.pop("compute_type") - - transcriber = WhisperTranscriber(delete_uploaded_files=False, vad_cpu_cores=vad_cpu_cores, app_config=app_config) - transcriber.set_parallel_devices(args.pop("vad_parallel_devices")) - transcriber.set_auto_parallel(auto_parallel) - - model = create_whisper_container(whisper_implementation=whisper_implementation, model_name=model_name, - device=device, compute_type=compute_type, download_root=model_dir, models=app_config.models) - - if (transcriber._has_parallel_devices()): - print("Using parallel devices:", transcriber.parallel_device_list) - - for audio_path in args.pop("audio"): - sources = [] - - # Detect URL and download the audio - if (uri_validator(audio_path)): - # Download from YouTube/URL directly - for source_path in download_url(audio_path, maxDuration=-1, destinationDirectory=output_dir, playlistItems=None): - source_name = os.path.basename(source_path) - sources.append({ "path": source_path, "name": source_name }) - else: - sources.append({ "path": audio_path, "name": os.path.basename(audio_path) }) - - for source in sources: - source_path = source["path"] - source_name = source["name"] - - vadOptions = VadOptions(vad, vad_merge_window, vad_max_merge_size, vad_padding, vad_prompt_window, - VadInitialPromptMode.from_string(vad_initial_prompt_mode)) - - result = transcriber.transcribe_file(model, source_path, temperature=temperature, vadOptions=vadOptions, **args) - - transcriber.write_result(result, source_name, output_dir) - - transcriber.close() - -def uri_validator(x): - try: - result = urlparse(x) - return all([result.scheme, result.netloc]) - except: - return False - -if __name__ == '__main__': - cli() \ No newline at end of file diff --git a/spaces/ruslanmv/TextToVideo-Dalle/README.md b/spaces/ruslanmv/TextToVideo-Dalle/README.md deleted file mode 100644 index 9fdae974a40f0c13d7d5e16427f8ad828b442b94..0000000000000000000000000000000000000000 --- a/spaces/ruslanmv/TextToVideo-Dalle/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: TextToVideo Dalle -emoji: 📉 -colorFrom: gray -colorTo: pink -sdk: gradio -sdk_version: 3.1.6 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/s3nh/totally-not-an-llm-AlpacaCielo2-7b-8k-GGML/README.md b/spaces/s3nh/totally-not-an-llm-AlpacaCielo2-7b-8k-GGML/README.md deleted file mode 100644 index bb35beb13347ced77957a773d5ce53664f0140ed..0000000000000000000000000000000000000000 --- a/spaces/s3nh/totally-not-an-llm-AlpacaCielo2-7b-8k-GGML/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Totally Not An Llm AlpacaCielo2 7b 8k GGML -emoji: 🦀 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sahshd/ChuanhuChatGPT/modules/config.py b/spaces/sahshd/ChuanhuChatGPT/modules/config.py deleted file mode 100644 index 2eee7730787df6a857de21dbb0cbefc42cb7273d..0000000000000000000000000000000000000000 --- a/spaces/sahshd/ChuanhuChatGPT/modules/config.py +++ /dev/null @@ -1,173 +0,0 @@ -from collections import defaultdict -from contextlib import contextmanager -import os -import logging -import sys -import commentjson as json - -from . import shared -from . import presets - - -__all__ = [ - "my_api_key", - "authflag", - "auth_list", - "dockerflag", - "retrieve_proxy", - "log_level", - "advance_docs", - "update_doc_config", - "multi_api_key", - "server_name", - "server_port", - "share", -] - -# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低) -# 同时,也可以为后续支持自定义功能提供config的帮助 -if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) -else: - config = {} - -lang_config = config.get("language", "auto") -language = os.environ.get("LANGUAGE", lang_config) - -if os.path.exists("api_key.txt"): - logging.info("检测到api_key.txt文件,正在进行迁移...") - with open("api_key.txt", "r") as f: - config["openai_api_key"] = f.read().strip() - os.rename("api_key.txt", "api_key(deprecated).txt") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4) - -if os.path.exists("auth.json"): - logging.info("检测到auth.json文件,正在进行迁移...") - auth_list = [] - with open("auth.json", "r", encoding='utf-8') as f: - auth = json.load(f) - for _ in auth: - if auth[_]["username"] and auth[_]["password"]: - auth_list.append((auth[_]["username"], auth[_]["password"])) - else: - logging.error("请检查auth.json文件中的用户名和密码!") - sys.exit(1) - config["users"] = auth_list - os.rename("auth.json", "auth(deprecated).json") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4) - -## 处理docker if we are running in Docker -dockerflag = config.get("dockerflag", False) -if os.environ.get("dockerrun") == "yes": - dockerflag = True - -## 处理 api-key 以及 允许的用户列表 -my_api_key = config.get("openai_api_key", "") -my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key) - -xmchat_api_key = config.get("xmchat_api_key", "") -if os.environ.get("XMCHAT_API_KEY", None) == None: - os.environ["XMCHAT_API_KEY"] = xmchat_api_key - -## 多账户机制 -multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制 -if multi_api_key: - api_key_list = config.get("api_key_list", []) - if len(api_key_list) == 0: - logging.error("多账号模式已开启,但api_key_list为空,请检查config.json") - sys.exit(1) - shared.state.set_api_key_queue(api_key_list) - -auth_list = config.get("users", []) # 实际上是使用者的列表 -authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度 - -# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配 -api_host = os.environ.get("api_host", config.get("api_host", "")) -if api_host: - shared.state.set_api_host(api_host) - -@contextmanager -def retrieve_openai_api(api_key = None): - old_api_key = os.environ.get("OPENAI_API_KEY", "") - if api_key is None: - os.environ["OPENAI_API_KEY"] = my_api_key - yield my_api_key - else: - os.environ["OPENAI_API_KEY"] = api_key - yield api_key - os.environ["OPENAI_API_KEY"] = old_api_key - -## 处理log -log_level = config.get("log_level", "INFO") -logging.basicConfig( - level=log_level, - format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", -) - -## 处理代理: -http_proxy = config.get("http_proxy", "") -https_proxy = config.get("https_proxy", "") -http_proxy = os.environ.get("HTTP_PROXY", http_proxy) -https_proxy = os.environ.get("HTTPS_PROXY", https_proxy) - -# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错 -os.environ["HTTP_PROXY"] = "" -os.environ["HTTPS_PROXY"] = "" - -local_embedding = config.get("local_embedding", False) # 是否使用本地embedding - -@contextmanager -def retrieve_proxy(proxy=None): - """ - 1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理 - 2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量 - """ - global http_proxy, https_proxy - if proxy is not None: - http_proxy = proxy - https_proxy = proxy - yield http_proxy, https_proxy - else: - old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] - os.environ["HTTP_PROXY"] = http_proxy - os.environ["HTTPS_PROXY"] = https_proxy - yield http_proxy, https_proxy # return new proxy - - # return old proxy - os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var - - -## 处理advance docs -advance_docs = defaultdict(lambda: defaultdict(dict)) -advance_docs.update(config.get("advance_docs", {})) -def update_doc_config(two_column_pdf): - global advance_docs - advance_docs["pdf"]["two_column"] = two_column_pdf - - logging.info(f"更新后的文件参数为:{advance_docs}") - -## 处理gradio.launch参数 -server_name = config.get("server_name", None) -server_port = config.get("server_port", None) -if server_name is None: - if dockerflag: - server_name = "0.0.0.0" - else: - server_name = "127.0.0.1" -if server_port is None: - if dockerflag: - server_port = 7860 - -assert server_port is None or type(server_port) == int, "要求port设置为int类型" - -# 设置默认model -default_model = config.get("default_model", "") -try: - presets.DEFAULT_MODEL = presets.MODELS.index(default_model) -except ValueError: - pass - -share = config.get("share", False) diff --git a/spaces/sail/lorahub/redirect.py b/spaces/sail/lorahub/redirect.py deleted file mode 100644 index 80a711b69eed897803c437015b56bca5ce0d9ca8..0000000000000000000000000000000000000000 --- a/spaces/sail/lorahub/redirect.py +++ /dev/null @@ -1,128 +0,0 @@ -import streamlit as st -import io -import contextlib -import sys -import re - - -class _Redirect: - class IOStuff(io.StringIO): - def __init__(self, trigger, max_buffer, buffer_separator, regex, dup=None): - super().__init__() - self._trigger = trigger - self._max_buffer = max_buffer - self._buffer_separator = buffer_separator - self._regex = regex and re.compile(regex) - self._dup = dup - - def write(self, __s: str) -> int: - if self._max_buffer: - concatenated_len = super().tell() + len(__s) - if concatenated_len > self._max_buffer: - rest = self.get_filtered_output()[concatenated_len - self._max_buffer:] - if self._buffer_separator is not None: - rest = rest.split(self._buffer_separator, 1)[-1] - super().seek(0) - super().write(rest) - super().truncate(super().tell() + len(__s)) - res = super().write(__s) - if self._dup is not None: - self._dup.write(__s) - self._trigger(self.get_filtered_output()) - return res - - def get_filtered_output(self): - if self._regex is None or self._buffer_separator is None: - return self.getvalue() - - return self._buffer_separator.join(filter(self._regex.search, self.getvalue().split(self._buffer_separator))) - - def print_at_end(self): - self._trigger(self.get_filtered_output()) - - def __init__(self, stdout=None, stderr=False, format=None, to=None, max_buffer=None, buffer_separator='\n', - regex=None, duplicate_out=False): - self.io_args = {'trigger': self._write, 'max_buffer': max_buffer, 'buffer_separator': buffer_separator, - 'regex': regex} - self.redirections = [] - self.st = None - self.stderr = stderr is True - self.stdout = stdout is True or (stdout is None and not self.stderr) - self.format = format or 'code' - self.to = to - self.fun = None - self.duplicate_out = duplicate_out or None - self.active_nested = None - - if not self.stdout and not self.stderr: - raise ValueError("one of stdout or stderr must be True") - - if self.format not in ['text', 'markdown', 'latex', 'code', 'write']: - raise ValueError( - f"format need oneof the following: {', '.join(['text', 'markdown', 'latex', 'code', 'write'])}") - - if self.to and (not hasattr(self.to, 'text') or not hasattr(self.to, 'empty')): - raise ValueError(f"'to' is not a streamlit container object") - - def __enter__(self): - if self.st is not None: - if self.to is None: - if self.active_nested is None: - self.active_nested = self(format=self.format, max_buffer=self.io_args['max_buffer'], - buffer_separator=self.io_args['buffer_separator'], - regex=self.io_args['regex'], duplicate_out=self.duplicate_out) - return self.active_nested.__enter__() - else: - raise Exception("Already entered") - to = self.to or st - - # to.text(f"{'stdout and stderr' if self.stdout and self.stderr else 'stdout' if self.stdout else 'stderr'}" - # f"{' [' + self.io_args['regex'] + ']' if self.io_args['regex'] else ''}" - # f":") - self.st = to.empty() - self.fun = getattr(self.st, self.format) - - io_obj = None - - def redirect(to_duplicate): - nonlocal io_obj - io_obj = _Redirect.IOStuff(dup=self.duplicate_out and to_duplicate, **self.io_args) - redirection = contextlib.redirect_stdout(io_obj) - self.redirections.append((redirection, io_obj)) - redirection.__enter__() - - if self.stderr: - redirect(sys.stderr) - if self.stdout: - redirect(sys.stdout) - - return io_obj - - def __call__(self, to=None, format=None, max_buffer=None, buffer_separator='\n', regex=None, duplicate_out=False): - return _Redirect(self.stdout, self.stderr, format=format, to=to, max_buffer=max_buffer, - buffer_separator=buffer_separator, regex=regex, duplicate_out=duplicate_out) - - def __exit__(self, *exc): - if self.active_nested is not None: - nested = self.active_nested - if nested.active_nested is None: - self.active_nested = None - return nested.__exit__(*exc) - - res = None - for redirection, io_obj in reversed(self.redirections): - res = redirection.__exit__(*exc) - io_obj.print_at_end() - - self.redirections = [] - self.st = None - self.fun = None - return res - - def _write(self, data): - self.fun(data) - - -stdout = _Redirect() -stderr = _Redirect(stderr=True) -stdouterr = _Redirect(stdout=True, stderr=True) diff --git a/spaces/scedlatioru/img-to-music/example/Free Download Mazacam 25 ((NEW)).md b/spaces/scedlatioru/img-to-music/example/Free Download Mazacam 25 ((NEW)).md deleted file mode 100644 index 38fa8a63a7aaeb71a76063c903aeba721c6880b1..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Free Download Mazacam 25 ((NEW)).md +++ /dev/null @@ -1,9 +0,0 @@ - -

            mazacam for mazatrol and g-codes is an easy-to-use graphical data entry tool that allows the user to quickly produce pre-programmed g-code or mazatrol programs. mazacam organizes all common mazatrol configurations into easy-to-search folders, and searches these folders for mazacam templates that fit the mazatrol configuration. for example, if a given mazatrol configuration requires that a certain tool path be deleted after the tool is completed, the user simply provides a template for the configuration, and clicks a button to generate the appropriate program. the programs are downloaded to a local library on the mazatrol, and are automatically uploaded to the mazatrol's controller board and mazatrol program memory.

            -

            if a configuration is missing or needs to be uploaded, the program will continue to look through the mazatrol's folders and template folders for the requested configuration. once it finds the configuration, it will automatically generate the programming language and download the programmed program directly to the mazatrol's controller board. the program will then upload the file to the mazatrol's program memory.

            -

            Free download mazacam 25


            Download File ---> https://gohhs.com/2uEz4L



            -

            the program can also be used to download a digital image from the mazatrol's photos library. for example, if the mazatrol is supposed to be cutting wood, you can tell the program to download a picture of a piece of wood.

            -

            the program starts by looking through the mazatrol's photos library to find a suitable picture. once the picture is located, the program tells the user what it is going to do and then starts the download. once the download is complete, the program will reload the image into the mazatrol's photos library, and tell the user that the image was successfully loaded.

            -

            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/sczhou/ProPainter/inference_propainter.py b/spaces/sczhou/ProPainter/inference_propainter.py deleted file mode 100644 index c50ac89b97c1a4fbbd38486b3250832c1fee79c5..0000000000000000000000000000000000000000 --- a/spaces/sczhou/ProPainter/inference_propainter.py +++ /dev/null @@ -1,476 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import cv2 -import argparse -import imageio -import numpy as np -import scipy.ndimage -from PIL import Image -from tqdm import tqdm - -import torch -import torchvision - -from model.modules.flow_comp_raft import RAFT_bi -from model.recurrent_flow_completion import RecurrentFlowCompleteNet -from model.propainter import InpaintGenerator -from utils.download_util import load_file_from_url -from core.utils import to_tensors -from model.misc import get_device - -import warnings -warnings.filterwarnings("ignore") - -pretrain_model_url = 'https://github.com/sczhou/ProPainter/releases/download/v0.1.0/' - -def imwrite(img, file_path, params=None, auto_mkdir=True): - if auto_mkdir: - dir_name = os.path.abspath(os.path.dirname(file_path)) - os.makedirs(dir_name, exist_ok=True) - return cv2.imwrite(file_path, img, params) - - -# resize frames -def resize_frames(frames, size=None): - if size is not None: - out_size = size - process_size = (out_size[0]-out_size[0]%8, out_size[1]-out_size[1]%8) - frames = [f.resize(process_size) for f in frames] - else: - out_size = frames[0].size - process_size = (out_size[0]-out_size[0]%8, out_size[1]-out_size[1]%8) - if not out_size == process_size: - frames = [f.resize(process_size) for f in frames] - - return frames, process_size, out_size - - -# read frames from video -def read_frame_from_videos(frame_root): - if frame_root.endswith(('mp4', 'mov', 'avi', 'MP4', 'MOV', 'AVI')): # input video path - video_name = os.path.basename(frame_root)[:-4] - vframes, aframes, info = torchvision.io.read_video(filename=frame_root, pts_unit='sec') # RGB - frames = list(vframes.numpy()) - frames = [Image.fromarray(f) for f in frames] - fps = info['video_fps'] - else: - video_name = os.path.basename(frame_root) - frames = [] - fr_lst = sorted(os.listdir(frame_root)) - for fr in fr_lst: - frame = cv2.imread(os.path.join(frame_root, fr)) - frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) - frames.append(frame) - fps = None - size = frames[0].size - - return frames, fps, size, video_name - - -def binary_mask(mask, th=0.1): - mask[mask>th] = 1 - mask[mask<=th] = 0 - return mask - - -# read frame-wise masks -def read_mask(mpath, length, size, flow_mask_dilates=8, mask_dilates=5): - masks_img = [] - masks_dilated = [] - flow_masks = [] - - if mpath.endswith(('jpg', 'jpeg', 'png', 'JPG', 'JPEG', 'PNG')): # input single img path - masks_img = [Image.open(mpath)] - else: - mnames = sorted(os.listdir(mpath)) - for mp in mnames: - masks_img.append(Image.open(os.path.join(mpath, mp))) - - for mask_img in masks_img: - if size is not None: - mask_img = mask_img.resize(size, Image.NEAREST) - mask_img = np.array(mask_img.convert('L')) - - # Dilate 8 pixel so that all known pixel is trustworthy - if flow_mask_dilates > 0: - flow_mask_img = scipy.ndimage.binary_dilation(mask_img, iterations=flow_mask_dilates).astype(np.uint8) - else: - flow_mask_img = binary_mask(mask_img).astype(np.uint8) - # Close the small holes inside the foreground objects - # flow_mask_img = cv2.morphologyEx(flow_mask_img, cv2.MORPH_CLOSE, np.ones((21, 21),np.uint8)).astype(bool) - # flow_mask_img = scipy.ndimage.binary_fill_holes(flow_mask_img).astype(np.uint8) - flow_masks.append(Image.fromarray(flow_mask_img * 255)) - - if mask_dilates > 0: - mask_img = scipy.ndimage.binary_dilation(mask_img, iterations=mask_dilates).astype(np.uint8) - else: - mask_img = binary_mask(mask_img).astype(np.uint8) - masks_dilated.append(Image.fromarray(mask_img * 255)) - - if len(masks_img) == 1: - flow_masks = flow_masks * length - masks_dilated = masks_dilated * length - - return flow_masks, masks_dilated - - -def extrapolation(video_ori, scale): - """Prepares the data for video outpainting. - """ - nFrame = len(video_ori) - imgW, imgH = video_ori[0].size - - # Defines new FOV. - imgH_extr = int(scale[0] * imgH) - imgW_extr = int(scale[1] * imgW) - imgH_extr = imgH_extr - imgH_extr % 8 - imgW_extr = imgW_extr - imgW_extr % 8 - H_start = int((imgH_extr - imgH) / 2) - W_start = int((imgW_extr - imgW) / 2) - - # Extrapolates the FOV for video. - frames = [] - for v in video_ori: - frame = np.zeros(((imgH_extr, imgW_extr, 3)), dtype=np.uint8) - frame[H_start: H_start + imgH, W_start: W_start + imgW, :] = v - frames.append(Image.fromarray(frame)) - - # Generates the mask for missing region. - masks_dilated = [] - flow_masks = [] - - dilate_h = 4 if H_start > 10 else 0 - dilate_w = 4 if W_start > 10 else 0 - mask = np.ones(((imgH_extr, imgW_extr)), dtype=np.uint8) - - mask[H_start+dilate_h: H_start+imgH-dilate_h, - W_start+dilate_w: W_start+imgW-dilate_w] = 0 - flow_masks.append(Image.fromarray(mask * 255)) - - mask[H_start: H_start+imgH, W_start: W_start+imgW] = 0 - masks_dilated.append(Image.fromarray(mask * 255)) - - flow_masks = flow_masks * nFrame - masks_dilated = masks_dilated * nFrame - - return frames, flow_masks, masks_dilated, (imgW_extr, imgH_extr) - - -def get_ref_index(mid_neighbor_id, neighbor_ids, length, ref_stride=10, ref_num=-1): - ref_index = [] - if ref_num == -1: - for i in range(0, length, ref_stride): - if i not in neighbor_ids: - ref_index.append(i) - else: - start_idx = max(0, mid_neighbor_id - ref_stride * (ref_num // 2)) - end_idx = min(length, mid_neighbor_id + ref_stride * (ref_num // 2)) - for i in range(start_idx, end_idx, ref_stride): - if i not in neighbor_ids: - if len(ref_index) > ref_num: - break - ref_index.append(i) - return ref_index - - - -if __name__ == '__main__': - # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - device = get_device() - - parser = argparse.ArgumentParser() - parser.add_argument( - '-i', '--video', type=str, default='inputs/object_removal/bmx-trees', help='Path of the input video or image folder.') - parser.add_argument( - '-m', '--mask', type=str, default='inputs/object_removal/bmx-trees_mask', help='Path of the mask(s) or mask folder.') - parser.add_argument( - '-o', '--output', type=str, default='results', help='Output folder. Default: results') - parser.add_argument( - "--resize_ratio", type=float, default=1.0, help='Resize scale for processing video.') - parser.add_argument( - '--height', type=int, default=-1, help='Height of the processing video.') - parser.add_argument( - '--width', type=int, default=-1, help='Width of the processing video.') - parser.add_argument( - '--mask_dilation', type=int, default=4, help='Mask dilation for video and flow masking.') - parser.add_argument( - "--ref_stride", type=int, default=10, help='Stride of global reference frames.') - parser.add_argument( - "--neighbor_length", type=int, default=10, help='Length of local neighboring frames.') - parser.add_argument( - "--subvideo_length", type=int, default=80, help='Length of sub-video for long video inference.') - parser.add_argument( - "--raft_iter", type=int, default=20, help='Iterations for RAFT inference.') - parser.add_argument( - '--mode', default='video_inpainting', choices=['video_inpainting', 'video_outpainting'], help="Modes: video_inpainting / video_outpainting") - parser.add_argument( - '--scale_h', type=float, default=1.0, help='Outpainting scale of height for video_outpainting mode.') - parser.add_argument( - '--scale_w', type=float, default=1.2, help='Outpainting scale of width for video_outpainting mode.') - parser.add_argument( - '--save_fps', type=int, default=24, help='Frame per second. Default: 24') - parser.add_argument( - '--save_frames', action='store_true', help='Save output frames. Default: False') - parser.add_argument( - '--fp16', action='store_true', help='Use fp16 (half precision) during inference. Default: fp32 (single precision).') - - args = parser.parse_args() - - # Use fp16 precision during inference to reduce running memory cost - use_half = True if args.fp16 else False - if device == torch.device('cpu'): - use_half = False - - frames, fps, size, video_name = read_frame_from_videos(args.video) - if not args.width == -1 and not args.height == -1: - size = (args.width, args.height) - if not args.resize_ratio == 1.0: - size = (int(args.resize_ratio * size[0]), int(args.resize_ratio * size[1])) - - frames, size, out_size = resize_frames(frames, size) - - fps = args.save_fps if fps is None else fps - save_root = os.path.join(args.output, video_name) - if not os.path.exists(save_root): - os.makedirs(save_root, exist_ok=True) - - if args.mode == 'video_inpainting': - frames_len = len(frames) - flow_masks, masks_dilated = read_mask(args.mask, frames_len, size, - flow_mask_dilates=args.mask_dilation, - mask_dilates=args.mask_dilation) - w, h = size - elif args.mode == 'video_outpainting': - assert args.scale_h is not None and args.scale_w is not None, 'Please provide a outpainting scale (s_h, s_w).' - frames, flow_masks, masks_dilated, size = extrapolation(frames, (args.scale_h, args.scale_w)) - w, h = size - else: - raise NotImplementedError - - # for saving the masked frames or video - masked_frame_for_save = [] - for i in range(len(frames)): - mask_ = np.expand_dims(np.array(masks_dilated[i]),2).repeat(3, axis=2)/255. - img = np.array(frames[i]) - green = np.zeros([h, w, 3]) - green[:,:,1] = 255 - alpha = 0.6 - # alpha = 1.0 - fuse_img = (1-alpha)*img + alpha*green - fuse_img = mask_ * fuse_img + (1-mask_)*img - masked_frame_for_save.append(fuse_img.astype(np.uint8)) - - frames_inp = [np.array(f).astype(np.uint8) for f in frames] - frames = to_tensors()(frames).unsqueeze(0) * 2 - 1 - flow_masks = to_tensors()(flow_masks).unsqueeze(0) - masks_dilated = to_tensors()(masks_dilated).unsqueeze(0) - frames, flow_masks, masks_dilated = frames.to(device), flow_masks.to(device), masks_dilated.to(device) - - - ############################################## - # set up RAFT and flow competition model - ############################################## - ckpt_path = load_file_from_url(url=os.path.join(pretrain_model_url, 'raft-things.pth'), - model_dir='weights', progress=True, file_name=None) - fix_raft = RAFT_bi(ckpt_path, device) - - ckpt_path = load_file_from_url(url=os.path.join(pretrain_model_url, 'recurrent_flow_completion.pth'), - model_dir='weights', progress=True, file_name=None) - fix_flow_complete = RecurrentFlowCompleteNet(ckpt_path) - for p in fix_flow_complete.parameters(): - p.requires_grad = False - fix_flow_complete.to(device) - fix_flow_complete.eval() - - - ############################################## - # set up ProPainter model - ############################################## - ckpt_path = load_file_from_url(url=os.path.join(pretrain_model_url, 'ProPainter.pth'), - model_dir='weights', progress=True, file_name=None) - model = InpaintGenerator(model_path=ckpt_path).to(device) - model.eval() - - - ############################################## - # ProPainter inference - ############################################## - video_length = frames.size(1) - print(f'\nProcessing: {video_name} [{video_length} frames]...') - with torch.no_grad(): - # ---- compute flow ---- - if frames.size(-1) <= 640: - short_clip_len = 12 - elif frames.size(-1) <= 720: - short_clip_len = 8 - elif frames.size(-1) <= 1280: - short_clip_len = 4 - else: - short_clip_len = 2 - - # use fp32 for RAFT - if frames.size(1) > short_clip_len: - gt_flows_f_list, gt_flows_b_list = [], [] - for f in range(0, video_length, short_clip_len): - end_f = min(video_length, f + short_clip_len) - if f == 0: - flows_f, flows_b = fix_raft(frames[:,f:end_f], iters=args.raft_iter) - else: - flows_f, flows_b = fix_raft(frames[:,f-1:end_f], iters=args.raft_iter) - - gt_flows_f_list.append(flows_f) - gt_flows_b_list.append(flows_b) - torch.cuda.empty_cache() - - gt_flows_f = torch.cat(gt_flows_f_list, dim=1) - gt_flows_b = torch.cat(gt_flows_b_list, dim=1) - gt_flows_bi = (gt_flows_f, gt_flows_b) - else: - gt_flows_bi = fix_raft(frames, iters=args.raft_iter) - torch.cuda.empty_cache() - - - if use_half: - frames, flow_masks, masks_dilated = frames.half(), flow_masks.half(), masks_dilated.half() - gt_flows_bi = (gt_flows_bi[0].half(), gt_flows_bi[1].half()) - fix_flow_complete = fix_flow_complete.half() - model = model.half() - - - # ---- complete flow ---- - flow_length = gt_flows_bi[0].size(1) - if flow_length > args.subvideo_length: - pred_flows_f, pred_flows_b = [], [] - pad_len = 5 - for f in range(0, flow_length, args.subvideo_length): - s_f = max(0, f - pad_len) - e_f = min(flow_length, f + args.subvideo_length + pad_len) - pad_len_s = max(0, f) - s_f - pad_len_e = e_f - min(flow_length, f + args.subvideo_length) - pred_flows_bi_sub, _ = fix_flow_complete.forward_bidirect_flow( - (gt_flows_bi[0][:, s_f:e_f], gt_flows_bi[1][:, s_f:e_f]), - flow_masks[:, s_f:e_f+1]) - pred_flows_bi_sub = fix_flow_complete.combine_flow( - (gt_flows_bi[0][:, s_f:e_f], gt_flows_bi[1][:, s_f:e_f]), - pred_flows_bi_sub, - flow_masks[:, s_f:e_f+1]) - - pred_flows_f.append(pred_flows_bi_sub[0][:, pad_len_s:e_f-s_f-pad_len_e]) - pred_flows_b.append(pred_flows_bi_sub[1][:, pad_len_s:e_f-s_f-pad_len_e]) - torch.cuda.empty_cache() - - pred_flows_f = torch.cat(pred_flows_f, dim=1) - pred_flows_b = torch.cat(pred_flows_b, dim=1) - pred_flows_bi = (pred_flows_f, pred_flows_b) - else: - pred_flows_bi, _ = fix_flow_complete.forward_bidirect_flow(gt_flows_bi, flow_masks) - pred_flows_bi = fix_flow_complete.combine_flow(gt_flows_bi, pred_flows_bi, flow_masks) - torch.cuda.empty_cache() - - - # ---- image propagation ---- - masked_frames = frames * (1 - masks_dilated) - subvideo_length_img_prop = min(100, args.subvideo_length) # ensure a minimum of 100 frames for image propagation - if video_length > subvideo_length_img_prop: - updated_frames, updated_masks = [], [] - pad_len = 10 - for f in range(0, video_length, subvideo_length_img_prop): - s_f = max(0, f - pad_len) - e_f = min(video_length, f + subvideo_length_img_prop + pad_len) - pad_len_s = max(0, f) - s_f - pad_len_e = e_f - min(video_length, f + subvideo_length_img_prop) - - b, t, _, _, _ = masks_dilated[:, s_f:e_f].size() - pred_flows_bi_sub = (pred_flows_bi[0][:, s_f:e_f-1], pred_flows_bi[1][:, s_f:e_f-1]) - prop_imgs_sub, updated_local_masks_sub = model.img_propagation(masked_frames[:, s_f:e_f], - pred_flows_bi_sub, - masks_dilated[:, s_f:e_f], - 'nearest') - updated_frames_sub = frames[:, s_f:e_f] * (1 - masks_dilated[:, s_f:e_f]) + \ - prop_imgs_sub.view(b, t, 3, h, w) * masks_dilated[:, s_f:e_f] - updated_masks_sub = updated_local_masks_sub.view(b, t, 1, h, w) - - updated_frames.append(updated_frames_sub[:, pad_len_s:e_f-s_f-pad_len_e]) - updated_masks.append(updated_masks_sub[:, pad_len_s:e_f-s_f-pad_len_e]) - torch.cuda.empty_cache() - - updated_frames = torch.cat(updated_frames, dim=1) - updated_masks = torch.cat(updated_masks, dim=1) - else: - b, t, _, _, _ = masks_dilated.size() - prop_imgs, updated_local_masks = model.img_propagation(masked_frames, pred_flows_bi, masks_dilated, 'nearest') - updated_frames = frames * (1 - masks_dilated) + prop_imgs.view(b, t, 3, h, w) * masks_dilated - updated_masks = updated_local_masks.view(b, t, 1, h, w) - torch.cuda.empty_cache() - - - ori_frames = frames_inp - comp_frames = [None] * video_length - - neighbor_stride = args.neighbor_length // 2 - if video_length > args.subvideo_length: - ref_num = args.subvideo_length // args.ref_stride - else: - ref_num = -1 - - # ---- feature propagation + transformer ---- - for f in tqdm(range(0, video_length, neighbor_stride)): - neighbor_ids = [ - i for i in range(max(0, f - neighbor_stride), - min(video_length, f + neighbor_stride + 1)) - ] - ref_ids = get_ref_index(f, neighbor_ids, video_length, args.ref_stride, ref_num) - selected_imgs = updated_frames[:, neighbor_ids + ref_ids, :, :, :] - selected_masks = masks_dilated[:, neighbor_ids + ref_ids, :, :, :] - selected_update_masks = updated_masks[:, neighbor_ids + ref_ids, :, :, :] - selected_pred_flows_bi = (pred_flows_bi[0][:, neighbor_ids[:-1], :, :, :], pred_flows_bi[1][:, neighbor_ids[:-1], :, :, :]) - - with torch.no_grad(): - # 1.0 indicates mask - l_t = len(neighbor_ids) - - # pred_img = selected_imgs # results of image propagation - pred_img = model(selected_imgs, selected_pred_flows_bi, selected_masks, selected_update_masks, l_t) - - pred_img = pred_img.view(-1, 3, h, w) - - pred_img = (pred_img + 1) / 2 - pred_img = pred_img.cpu().permute(0, 2, 3, 1).numpy() * 255 - binary_masks = masks_dilated[0, neighbor_ids, :, :, :].cpu().permute( - 0, 2, 3, 1).numpy().astype(np.uint8) - for i in range(len(neighbor_ids)): - idx = neighbor_ids[i] - img = np.array(pred_img[i]).astype(np.uint8) * binary_masks[i] \ - + ori_frames[idx] * (1 - binary_masks[i]) - if comp_frames[idx] is None: - comp_frames[idx] = img - else: - comp_frames[idx] = comp_frames[idx].astype(np.float32) * 0.5 + img.astype(np.float32) * 0.5 - - comp_frames[idx] = comp_frames[idx].astype(np.uint8) - - torch.cuda.empty_cache() - - # save each frame - if args.save_frames: - for idx in range(video_length): - f = comp_frames[idx] - f = cv2.resize(f, out_size, interpolation = cv2.INTER_CUBIC) - f = cv2.cvtColor(f, cv2.COLOR_BGR2RGB) - img_save_root = os.path.join(save_root, 'frames', str(idx).zfill(4)+'.png') - imwrite(f, img_save_root) - - - # if args.mode == 'video_outpainting': - # comp_frames = [i[10:-10,10:-10] for i in comp_frames] - # masked_frame_for_save = [i[10:-10,10:-10] for i in masked_frame_for_save] - - # save videos frame - masked_frame_for_save = [cv2.resize(f, out_size) for f in masked_frame_for_save] - comp_frames = [cv2.resize(f, out_size) for f in comp_frames] - imageio.mimwrite(os.path.join(save_root, 'masked_in.mp4'), masked_frame_for_save, fps=fps, quality=7) - imageio.mimwrite(os.path.join(save_root, 'inpaint_out.mp4'), comp_frames, fps=fps, quality=7) - - print(f'\nAll results are saved in {save_root}') - - torch.cuda.empty_cache() \ No newline at end of file diff --git a/spaces/sczhou/ProPainter/web-demos/hugging_face/tracker/model/modules.py b/spaces/sczhou/ProPainter/web-demos/hugging_face/tracker/model/modules.py deleted file mode 100644 index 59c4170af5abfecf7b85ed7804fc390285e0194d..0000000000000000000000000000000000000000 --- a/spaces/sczhou/ProPainter/web-demos/hugging_face/tracker/model/modules.py +++ /dev/null @@ -1,85 +0,0 @@ -from typing import List, Iterable -import torch -import torch.nn as nn - -from tracker.model.group_modules import * - - -class MaskUpsampleBlock(nn.Module): - def __init__(self, in_dim: int, out_dim: int, scale_factor: int = 2): - super().__init__() - self.distributor = MainToGroupDistributor(method='add') - self.out_conv = GroupResBlock(in_dim, out_dim) - self.scale_factor = scale_factor - - def forward(self, in_g: torch.Tensor, skip_f: torch.Tensor) -> torch.Tensor: - g = upsample_groups(in_g, ratio=self.scale_factor) - g = self.distributor(skip_f, g) - g = self.out_conv(g) - return g - - -class DecoderFeatureProcessor(nn.Module): - def __init__(self, decoder_dims: List[int], out_dims: List[int]): - super().__init__() - self.transforms = nn.ModuleList([ - nn.Conv2d(d_dim, p_dim, kernel_size=1) for d_dim, p_dim in zip(decoder_dims, out_dims) - ]) - - def forward(self, multi_scale_features: Iterable[torch.Tensor]) -> List[torch.Tensor]: - outputs = [func(x) for x, func in zip(multi_scale_features, self.transforms)] - return outputs - - -# @torch.jit.script -def _recurrent_update(h: torch.Tensor, values: torch.Tensor) -> torch.Tensor: - # h: batch_size * num_objects * hidden_dim * h * w - # values: batch_size * num_objects * (hidden_dim*3) * h * w - dim = values.shape[2] // 3 - forget_gate = torch.sigmoid(values[:, :, :dim]) - update_gate = torch.sigmoid(values[:, :, dim:dim * 2]) - new_value = torch.tanh(values[:, :, dim * 2:]) - new_h = forget_gate * h * (1 - update_gate) + update_gate * new_value - return new_h - - -class SensoryUpdater(nn.Module): - # Used in the decoder, multi-scale feature + GRU - def __init__(self, g_dims: List[int], mid_dim: int, sensory_dim: int): - super().__init__() - self.g16_conv = GConv2d(g_dims[0], mid_dim, kernel_size=1) - self.g8_conv = GConv2d(g_dims[1], mid_dim, kernel_size=1) - self.g4_conv = GConv2d(g_dims[2], mid_dim, kernel_size=1) - - self.transform = GConv2d(mid_dim + sensory_dim, sensory_dim * 3, kernel_size=3, padding=1) - - nn.init.xavier_normal_(self.transform.weight) - - def forward(self, g: torch.Tensor, h: torch.Tensor) -> torch.Tensor: - g = self.g16_conv(g[0]) + self.g8_conv(downsample_groups(g[1], ratio=1/2)) + \ - self.g4_conv(downsample_groups(g[2], ratio=1/4)) - - with torch.cuda.amp.autocast(enabled=False): - g = g.float() - h = h.float() - values = self.transform(torch.cat([g, h], dim=2)) - new_h = _recurrent_update(h, values) - - return new_h - - -class SensoryDeepUpdater(nn.Module): - def __init__(self, f_dim: int, sensory_dim: int): - super().__init__() - self.transform = GConv2d(f_dim + sensory_dim, sensory_dim * 3, kernel_size=3, padding=1) - - nn.init.xavier_normal_(self.transform.weight) - - def forward(self, g: torch.Tensor, h: torch.Tensor) -> torch.Tensor: - with torch.cuda.amp.autocast(enabled=False): - g = g.float() - h = h.float() - values = self.transform(torch.cat([g, h], dim=2)) - new_h = _recurrent_update(h, values) - - return new_h diff --git a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transducer/__init__.py b/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transducer/__init__.py deleted file mode 100644 index b7f177368e62a5578b8706300e101f831a3972ac..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transducer/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Initialize sub package.""" diff --git a/spaces/segments-tobias/conex/espnet/optimizer/pytorch.py b/spaces/segments-tobias/conex/espnet/optimizer/pytorch.py deleted file mode 100644 index 7914e36b999b50de79e3ed666dfcfa60ef8265a1..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/optimizer/pytorch.py +++ /dev/null @@ -1,93 +0,0 @@ -"""PyTorch optimizer builders.""" -import argparse - -import torch - -from espnet.optimizer.factory import OptimizerFactoryInterface -from espnet.optimizer.parser import adadelta -from espnet.optimizer.parser import adam -from espnet.optimizer.parser import sgd - - -class AdamFactory(OptimizerFactoryInterface): - """Adam factory.""" - - @staticmethod - def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Register args.""" - return adam(parser) - - @staticmethod - def from_args(target, args: argparse.Namespace): - """Initialize optimizer from argparse Namespace. - - Args: - target: for pytorch `model.parameters()`, - for chainer `model` - args (argparse.Namespace): parsed command-line args - - """ - return torch.optim.Adam( - target, - lr=args.lr, - weight_decay=args.weight_decay, - betas=(args.beta1, args.beta2), - ) - - -class SGDFactory(OptimizerFactoryInterface): - """SGD factory.""" - - @staticmethod - def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Register args.""" - return sgd(parser) - - @staticmethod - def from_args(target, args: argparse.Namespace): - """Initialize optimizer from argparse Namespace. - - Args: - target: for pytorch `model.parameters()`, - for chainer `model` - args (argparse.Namespace): parsed command-line args - - """ - return torch.optim.SGD( - target, - lr=args.lr, - weight_decay=args.weight_decay, - ) - - -class AdadeltaFactory(OptimizerFactoryInterface): - """Adadelta factory.""" - - @staticmethod - def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Register args.""" - return adadelta(parser) - - @staticmethod - def from_args(target, args: argparse.Namespace): - """Initialize optimizer from argparse Namespace. - - Args: - target: for pytorch `model.parameters()`, - for chainer `model` - args (argparse.Namespace): parsed command-line args - - """ - return torch.optim.Adadelta( - target, - rho=args.rho, - eps=args.eps, - weight_decay=args.weight_decay, - ) - - -OPTIMIZER_FACTORY_DICT = { - "adam": AdamFactory, - "sgd": SGDFactory, - "adadelta": AdadeltaFactory, -} diff --git a/spaces/sgxz/bingo/next.config.js b/spaces/sgxz/bingo/next.config.js deleted file mode 100644 index 0e6ccd7fbc91d0459eaaff3e968ce0556789c605..0000000000000000000000000000000000000000 --- a/spaces/sgxz/bingo/next.config.js +++ /dev/null @@ -1,38 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - // output: 'export', - // assetPrefix: '.', - webpack: (config, { isServer }) => { - if (!isServer) { - config.resolve = { - ...config.resolve, - fallback: { - 'bufferutil': false, - 'utf-8-validate': false, - http: false, - https: false, - stream: false, - // fixes proxy-agent dependencies - net: false, - dns: false, - tls: false, - assert: false, - // fixes next-i18next dependencies - path: false, - fs: false, - // fixes mapbox dependencies - events: false, - // fixes sentry dependencies - process: false - } - }; - } - config.module.exprContextCritical = false; - - return config; - }, -} - -module.exports = (...args) => { - return nextConfig -} diff --git a/spaces/shivammehta25/Matcha-TTS/app.py b/spaces/shivammehta25/Matcha-TTS/app.py deleted file mode 100644 index e8308a0f5a2e9d83455f84f125d96dd8647b7ac6..0000000000000000000000000000000000000000 --- a/spaces/shivammehta25/Matcha-TTS/app.py +++ /dev/null @@ -1,326 +0,0 @@ -import tempfile -from argparse import Namespace -from pathlib import Path - -import gradio as gr -import soundfile as sf -import torch -from matcha.cli import (MATCHA_URLS, VOCODER_URLS, assert_model_downloaded, - get_device, load_matcha, load_vocoder, process_text, - to_waveform) -from matcha.utils.utils import get_user_data_dir, plot_tensor - -LOCATION = Path(get_user_data_dir()) - -args = Namespace( - cpu=False, - model="matcha_ljspeech", - vocoder="hifigan_T2_v1", - spk=0, -) - - -MATCHA_TTS_LOC = lambda x: LOCATION / f"{x}.ckpt" # noqa: E731 -VOCODER_LOC = lambda x: LOCATION / f"{x}" # noqa: E731 -LOGO_URL = "https://shivammehta25.github.io/Matcha-TTS/images/logo.png" -RADIO_OPTIONS = { - "Multi Speaker (VCTK)": { - "model": "matcha_vctk", - "vocoder": "hifigan_univ_v1", - }, - "Single Speaker (LJ Speech)": { - "model": "matcha_ljspeech", - "vocoder": "hifigan_T2_v1", - }, -} - -# Ensure all the required models are downloaded -assert_model_downloaded(MATCHA_TTS_LOC("matcha_ljspeech"), MATCHA_URLS["matcha_ljspeech"]) -assert_model_downloaded(VOCODER_LOC("hifigan_T2_v1"), VOCODER_URLS["hifigan_T2_v1"]) -assert_model_downloaded(MATCHA_TTS_LOC("matcha_vctk"), MATCHA_URLS["matcha_vctk"]) -assert_model_downloaded(VOCODER_LOC("hifigan_univ_v1"), VOCODER_URLS["hifigan_univ_v1"]) - -# get device -device = get_device(args) - -# Load default models -matcha_ljspeech = load_matcha(args.model, MATCHA_TTS_LOC(args.model), device) -hifigan_T2_v1, hifigan_T2_v1_denoiser = load_vocoder(args.vocoder, VOCODER_LOC(args.vocoder), device) - -matcha_vctk = load_matcha("matcha_vctk", MATCHA_TTS_LOC("matcha_vctk"), device) -hifigan_univ_v1, hifigan_univ_v1_denoiser = load_vocoder("hifigan_univ_v1", VOCODER_LOC("hifigan_univ_v1"), device) - - - -def load_model_ui(model_type, textbox): - model_name = RADIO_OPTIONS[model_type]["model"] - - if model_name == "matcha_ljspeech": - spk_slider = gr.update(visible=False, value=-1) - single_speaker_examples = gr.update(visible=True) - multi_speaker_examples = gr.update(visible=False) - length_scale = gr.update(value=0.95) - else: - spk_slider = gr.update(visible=True, value=0) - single_speaker_examples = gr.update(visible=False) - multi_speaker_examples = gr.update(visible=True) - length_scale = gr.update(value=0.85) - - return textbox, gr.update(interactive=True), spk_slider, single_speaker_examples, multi_speaker_examples, length_scale - - -@torch.inference_mode() -def process_text_gradio(text): - output = process_text(1, text, device) - return output["x_phones"][1::2], output["x"], output["x_lengths"] - - -@torch.inference_mode() -def synthesise_mel(text, text_length, n_timesteps, temperature, length_scale, spk): - spk = torch.tensor([spk], device=device, dtype=torch.long) if spk >= 0 else None - - if spk is None: - output = matcha_ljspeech.synthesise( - text, - text_length, - n_timesteps=n_timesteps, - temperature=temperature, - spks=None, - length_scale=length_scale, - ) - output["waveform"] = to_waveform(output["mel"], hifigan_T2_v1, hifigan_T2_v1_denoiser) - else: - output = matcha_vctk.synthesise( - text, - text_length, - n_timesteps=n_timesteps, - temperature=temperature, - spks=spk, - length_scale=length_scale, - ) - output["waveform"] = to_waveform(output["mel"], hifigan_univ_v1, hifigan_univ_v1_denoiser) - - with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp: - sf.write(fp.name, output["waveform"], 22050, "PCM_24") - - return fp.name, plot_tensor(output["mel"].squeeze().cpu().numpy()) - - -def multispeaker_example_cacher(text, n_timesteps, mel_temp, length_scale, spk): - phones, text, text_lengths = process_text_gradio(text) - audio, mel_spectrogram = synthesise_mel(text, text_lengths, n_timesteps, mel_temp, length_scale, spk) - return phones, audio, mel_spectrogram - - -def ljspeech_example_cacher(text, n_timesteps, mel_temp, length_scale, spk=-1): - phones, text, text_lengths = process_text_gradio(text) - audio, mel_spectrogram = synthesise_mel(text, text_lengths, n_timesteps, mel_temp, length_scale, spk) - return phones, audio, mel_spectrogram - - -description = """# 🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching -### [Shivam Mehta](https://www.kth.se/profile/smehta), [Ruibo Tu](https://www.kth.se/profile/ruibo), [Jonas Beskow](https://www.kth.se/profile/beskow), [Éva Székely](https://www.kth.se/profile/szekely), and [Gustav Eje Henter](https://people.kth.se/~ghe/) -We propose 🍵 Matcha-TTS, a new approach to non-autoregressive neural TTS, that uses conditional flow matching (similar to rectified flows) to speed up ODE-based speech synthesis. Our method: - - -* Is probabilistic -* Has compact memory footprint -* Sounds highly natural -* Is very fast to synthesise from - - -Check out our [demo page](https://shivammehta25.github.io/Matcha-TTS). Read our [arXiv preprint for more details](https://arxiv.org/abs/2309.03199). -Code is available in our [GitHub repository](https://github.com/shivammehta25/Matcha-TTS), along with pre-trained models. - -Cached examples are available at the bottom of the page. - -Note: Synthesis speed may be slower than in our paper due to I/O latency and because this instance runs on CPUs. -""" - -with gr.Blocks(title="🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching") as demo: - processed_text = gr.State(value=None) - processed_text_len = gr.State(value=None) - - with gr.Box(): - with gr.Row(): - gr.Markdown(description, scale=3) - with gr.Column(): - gr.Image(LOGO_URL, label="Matcha-TTS logo", height=50, width=50, scale=1, show_label=False) - html = '
            ' - gr.HTML(html) - - with gr.Box(): - radio_options = list(RADIO_OPTIONS.keys()) - model_type = gr.Radio( - radio_options, value=radio_options[0], label="Choose a Model", interactive=True, container=False - ) - - with gr.Row(): - gr.Markdown("# Text Input") - with gr.Row(): - text = gr.Textbox(value="", lines=2, label="Text to synthesise", scale=3) - spk_slider = gr.Slider( - minimum=0, maximum=107, step=1, value=args.spk, label="Speaker ID", interactive=True, scale=1 - ) - - with gr.Row(): - gr.Markdown("### Hyper parameters") - with gr.Row(): - n_timesteps = gr.Slider( - label="Number of ODE steps", - minimum=1, - maximum=100, - step=1, - value=10, - interactive=True, - ) - length_scale = gr.Slider( - label="Length scale (Speaking rate)", - minimum=0.5, - maximum=1.5, - step=0.05, - value=0.85, - interactive=True, - ) - mel_temp = gr.Slider( - label="Sampling temperature", - minimum=0.00, - maximum=2.001, - step=0.16675, - value=0.667, - interactive=True, - ) - - synth_btn = gr.Button("Synthesise") - - with gr.Box(): - with gr.Row(): - gr.Markdown("### Phonetised text") - phonetised_text = gr.Textbox(interactive=False, scale=10, label="Phonetised text") - - with gr.Box(): - with gr.Row(): - mel_spectrogram = gr.Image(interactive=False, label="mel spectrogram") - - # with gr.Row(): - audio = gr.Audio(interactive=False, label="Audio") - - with gr.Row(visible=False) as example_row_lj_speech: - examples = gr.Examples( # pylint: disable=unused-variable - examples=[ - [ - "We propose Matcha-TTS, a new approach to non-autoregressive neural TTS, that uses conditional flow matching (similar to rectified flows) to speed up O D E-based speech synthesis.", - 50, - 0.677, - 0.95, - ], - [ - "The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.", - 2, - 0.677, - 0.95, - ], - [ - "The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.", - 4, - 0.677, - 0.95, - ], - [ - "The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.", - 10, - 0.677, - 0.95, - ], - [ - "The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.", - 50, - 0.677, - 0.95, - ], - [ - "The narrative of these events is based largely on the recollections of the participants.", - 10, - 0.677, - 0.95, - ], - [ - "The jury did not believe him, and the verdict was for the defendants.", - 10, - 0.677, - 0.95, - ], - ], - fn=ljspeech_example_cacher, - inputs=[text, n_timesteps, mel_temp, length_scale], - outputs=[phonetised_text, audio, mel_spectrogram], - cache_examples=True, - ) - - with gr.Row() as example_row_multispeaker: - multi_speaker_examples = gr.Examples( # pylint: disable=unused-variable - examples=[ - [ - "Hello everyone! I am speaker 0 and I am here to tell you that Matcha-TTS is amazing!", - 10, - 0.677, - 0.85, - 0, - ], - [ - "Hello everyone! I am speaker 16 and I am here to tell you that Matcha-TTS is amazing!", - 10, - 0.677, - 0.85, - 16, - ], - [ - "Hello everyone! I am speaker 44 and I am here to tell you that Matcha-TTS is amazing!", - 50, - 0.677, - 0.85, - 44, - ], - [ - "Hello everyone! I am speaker 45 and I am here to tell you that Matcha-TTS is amazing!", - 50, - 0.677, - 0.85, - 45, - ], - [ - "Hello everyone! I am speaker 58 and I am here to tell you that Matcha-TTS is amazing!", - 4, - 0.677, - 0.85, - 58, - ], - ], - fn=multispeaker_example_cacher, - inputs=[text, n_timesteps, mel_temp, length_scale, spk_slider], - outputs=[phonetised_text, audio, mel_spectrogram], - cache_examples=True, - label="Multi Speaker Examples", - ) - - model_type.change(lambda x: gr.update(interactive=False), inputs=[synth_btn], outputs=[synth_btn]).then( - load_model_ui, - inputs=[model_type, text], - outputs=[text, synth_btn, spk_slider, example_row_lj_speech, example_row_multispeaker, length_scale], - ) - - synth_btn.click( - fn=process_text_gradio, - inputs=[ - text, - ], - outputs=[phonetised_text, processed_text, processed_text_len], - api_name="matcha_tts", - queue=True, - ).then( - fn=synthesise_mel, - inputs=[processed_text, processed_text_len, n_timesteps, mel_temp, length_scale, spk_slider], - outputs=[audio, mel_spectrogram], - ) - - demo.queue(concurrency_count=5).launch() diff --git a/spaces/silencewing/server/youyou/.history/math_20230613230609.html b/spaces/silencewing/server/youyou/.history/math_20230613230609.html deleted file mode 100644 index 256be5436c1d83b7d96388ab84360edc77112124..0000000000000000000000000000000000000000 --- a/spaces/silencewing/server/youyou/.history/math_20230613230609.html +++ /dev/null @@ -1,224 +0,0 @@ - - - - - - - - - - Document - - - - -
            - - - - - - - - - - - - - - - - - - - - - - - - -
            题目
            答案
            正误
            得分
            -
            - - - - diff --git a/spaces/silencewing/server/youyou/.history/math_20230613230822.html b/spaces/silencewing/server/youyou/.history/math_20230613230822.html deleted file mode 100644 index f6f0526a73ee4d0d7ce9ca190329ea467da70a41..0000000000000000000000000000000000000000 --- a/spaces/silencewing/server/youyou/.history/math_20230613230822.html +++ /dev/null @@ -1,227 +0,0 @@ - - - - - - - - - - Document - - - - -
            - - - - - - - - - - - - - - - - - - - - - - - - -
            题目
            答案
            正误
            得分
            -
            - - - - diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Black Iron Stickman Rope Hero MOD APK The Ultimate Simulation Game with Infinite Cash.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Black Iron Stickman Rope Hero MOD APK The Ultimate Simulation Game with Infinite Cash.md deleted file mode 100644 index 637d752f1af1bbc9ee4ee70c924cf26743c1db33..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Black Iron Stickman Rope Hero MOD APK The Ultimate Simulation Game with Infinite Cash.md +++ /dev/null @@ -1,171 +0,0 @@ - -

            Black Iron Stickman Rope Hero Mod APK: A Fun and Action-Packed Game for Android Users

            -

            If you are looking for a game that combines superhero action, stickman graphics, and open-world exploration, then you should check out Black Iron Stickman Rope Hero Mod APK. This is a modified version of the original Stickman Rope Hero game by Naxeex Action & RPG Games, which gives you unlimited money, weapons, and vehicles to enjoy the game without any restrictions. In this article, we will tell you what Black Iron Stickman Rope Hero Mod APK is, how to download and install it, why you should play it, and what are some alternatives to it.

            -

            black iron stickman rope hero mod apk


            Download ✒ ✒ ✒ https://ssurll.com/2uNQUN



            -

            What is Black Iron Stickman Rope Hero Mod APK?

            -

            A modified version of the original Stickman Rope Hero game

            -

            Black Iron Stickman Rope Hero Mod APK is a game that lets you play as a stickman superhero who can use a rope to swing through the city and fight against evil clones. The game has amazing graphics, a big 3D city to explore, and various challenges and missions to complete. You can customize your stickman with different outfits and accessories, use melee weapons, guns, and vehicles to beat up your enemies, and collect coins, crystals, and other resources to upgrade your skills and abilities.

            -

            Features of the mod apk

            -

            The mod apk is a modified version of the original game that gives you some extra features that are not available in the original game. Some of these features are:

            -
              -
            • Unlimited money: You can buy anything you want in the game without worrying about running out of money.
            • -
            • Unlimited weapons: You can access all the weapons in the game, including melee weapons, guns, and heavy weapons.
            • -
            • Unlimited vehicles: You can drive any vehicle in the game, including cars, bikes, helicopters, and tanks.
            • -
            • No ads: You can enjoy the game without any annoying ads interrupting your gameplay.
            • -
            -

            How to Download and Install Black Iron Stickman Rope Hero Mod APK?

            -

            Steps to download and install the mod apk

            -

            To download and install Black Iron Stickman Rope Hero Mod APK, you need to follow these steps:

            -
              -
            1. Go to [1](https://apkdone.com/stickman-rope-hero/) and click on the download button to get the mod apk file.
            2. -
            3. Allow unknown sources on your device by going to Settings > Security > Unknown Sources.
            4. -
            5. Locate the downloaded file in your file manager and tap on it to install it.
            6. -
            7. Launch the game and enjoy playing with unlimited money, weapons, and vehicles.
            8. -
            -

            Tips and tricks to play the game

            -

            To play Black Iron Stickman Rope Hero Mod APK effectively, you need to know some tips and tricks that will help you master the game. Here are some of them:

            -
              -
            • Use your rope wisely: Your rope is your main tool for moving around the city. You can use it to swing from buildings, climb walls, or grab objects. You can also use it to attack enemies by wrapping them with your rope or throwing them away.
            • -
            • Choose your weapons carefully: Depending on your situation, you may need different weapons to deal with different enemies. For example, melee weapons are good for close combat, guns are good for shooting from a distance, and heavy weapons are good for destroying vehicles or groups of enemies.
            • -
            • Upgrade your skills and abilities: As you play the game, you will earn coins, crystals, and other resources that you can use to upgrade your skills and abilities. You can improve your health, stamina, damage, speed, and other attributes to become more powerful and efficient.
            • -
            • Explore the city and complete missions: The game has a big 3D city that you can explore and find hidden items, secrets, and easter eggs. You can also complete various missions and challenges that will reward you with more money, weapons, and vehicles. Some of the missions include saving civilians, fighting gangs, destroying clones, and more.
            • -
            -

            Why You Should Play Black Iron Stickman Rope Hero Mod APK?

            -

            The benefits of playing the mod apk

            -

            Playing Black Iron Stickman Rope Hero Mod APK has many benefits that will make you enjoy the game more. Some of these benefits are:

            -
              -
            • You can play the game without any limitations: With the mod apk, you can access all the features of the game without spending any real money or watching any ads. You can buy anything you want, use any weapon you like, and drive any vehicle you prefer.
            • -
            • You can have more fun and excitement: With the mod apk, you can experience more fun and excitement in the game. You can explore the city with more freedom, fight against more enemies, and complete more missions. You can also customize your stickman with different outfits and accessories to suit your style.
            • -
            • You can improve your skills and creativity: With the mod apk, you can improve your skills and creativity in the game. You can learn how to use your rope effectively, how to choose your weapons wisely, and how to upgrade your skills and abilities. You can also create your own scenarios and challenges in the game by using your imagination.
            • -
            -

            The challenges and fun of playing the mod apk

            -

            Playing Black Iron Stickman Rope Hero Mod APK also has some challenges and fun that will keep you hooked to the game. Some of these challenges and fun are:

            -
              -
            • You can face more dangers and enemies: With the mod apk, you can face more dangers and enemies in the game. You can encounter more clones that will try to stop you, more gangs that will try to attack you, and more vehicles that will try to chase you. You can also face more difficult missions and challenges that will test your skills and abilities.
            • -
            • You can enjoy more humor and comedy: With the mod apk, you can enjoy more humor and comedy in the game. You can witness funny animations, dialogues, and sounds that will make you laugh. You can also see hilarious situations, reactions, and outcomes that will make you smile.
            • -
            • You can share your achievements and experiences: With the mod apk, you can share your achievements and experiences in the game with your friends and other players. You can take screenshots or videos of your gameplay and post them on social media or online platforms. You can also compare your scores and rankings with other players and challenge them to beat you.
            • -
            -

            Alternatives to Black Iron Stickman Rope Hero Mod APK

            -

            Other stickman games you can try

            -

            If you like Black Iron Stickman Rope Hero Mod APK, you may also like other stickman games that are similar or different from it. Here are some of them:

            - - - - - - - - - - - - - - - - - - - - - - - - - - -
            NameDescriptionURL
            Stickman Legends: Shadow WarA stickman action RPG game where you fight against dark forces using various weapons and skills.[2](https://play.google.com/store/apps/details?id=com.zitga.ninja.stickman.legends.shadow.wars&hl=en_US&gl=US)
            Stick War: LegacyA stickman strategy game where you build an army of stickmen and conquer other territories.[3](https://play.google.com/store/apps/details?id=com.maxgames.stickwarlegacy&hl=en_US&gl=US)
            Draw a Stickman: EPIC 2A stickman adventure game where you draw your own stickman and explore a world of puzzles and secrets.[4](https://play.google.com/store/apps/details?id=com.hitcents.drawastickmanepic2&hl=en_US&gl=US)
            Stick Fight: The Game MobileA stickman multiplayer game where you fight against other players using physics-based weapons and maps.[5](https://play.google.com/store/apps /details?id=com.netease.ddsfna&hl=en_US&gl=US)
            -

            The pros and cons of the alternatives

            -

            While these stickman games are also fun and enjoyable, they have some pros and cons that you should consider before playing them. Here are some of them:

            -

            black iron stickman rope hero mod apk download
            -black iron stickman rope hero mod apk unlimited money
            -black iron stickman rope hero mod apk latest version
            -black iron stickman rope hero mod apk android 1
            -black iron stickman rope hero mod apk revdl
            -black iron stickman rope hero mod apk hack
            -black iron stickman rope hero mod apk free
            -black iron stickman rope hero mod apk offline
            -black iron stickman rope hero mod apk 2023
            -black iron stickman rope hero mod apk rexdl
            -black iron stickman rope hero mod apk no ads
            -black iron stickman rope hero mod apk obb
            -black iron stickman rope hero mod apk online
            -black iron stickman rope hero mod apk pure
            -black iron stickman rope hero mod apk vip
            -black iron stickman rope hero mod apk full
            -black iron stickman rope hero mod apk mega
            -black iron stickman rope hero mod apk update
            -black iron stickman rope hero mod apk old version
            -black iron stickman rope hero mod apk new version
            -black iron stickman rope hero mod apk 4.1.1
            -black iron stickman rope hero mod apk 4.0.2
            -black iron stickman rope hero mod apk 3.9.1
            -black iron stickman rope hero mod apk 3.8.5
            -black iron stickman rope hero mod apk 3.7.4
            -black iron stickman rope hero mod apk 3.6.3
            -black iron stickman rope hero mod apk 3.5.2
            -black iron stickman rope hero mod apk 3.4.1
            -black iron stickman rope hero mod apk 3.3.0
            -black iron stickman rope hero mod apk 3.2.9
            -black iron stickman rope hero mod apk 3.1.8
            -black iron stickman rope hero mod apk 3.0.7
            -black iron stickman rope hero mod apk 2.9.6
            -black iron stickman rope hero mod apk 2.8.5
            -black iron stickman rope hero mod apk 2.7.4
            -black iron stickman rope hero mod apk 2.6.3
            -black iron stickman rope hero mod apk 2.5.2
            -black iron stickman rope hero mod apk 2.4.1
            -black iron stickman rope hero mod apk 2.3.0
            -black iron stickman rope hero mod apk 2.2.9
            -black iron stickman rope hero mod apk 2.1.8
            -black iron stickman rope hero mod apk 2.0.7
            -black iron stickman rope hero mod apk 1.9.6
            -black iron stickman rope hero mod apk 1.8.5
            -black iron stickman rope hero mod apk 1.7.4
            -black iron stickman rope hero mod apk 1.6.3
            -black iron stickman rope hero mod apk 1.5.2
            -black iron stickman rope hero mod apk 1.4.1
            -black iron stickman rope hero mod apk 1.3.0

            - - - - - - - - - - - - - - - - - - - - - - - - - - -
            NameProsCons
            Stickman Legends: Shadow War- Has a variety of characters, weapons, and skills to choose from.
            - Has a captivating storyline and graphics.
            - Has different modes and levels to play.
            - Requires internet connection to play.
            - Has some bugs and glitches.
            - Has some in-app purchases and ads.
            Stick War: Legacy- Has a simple and intuitive gameplay.
            - Has a creative and humorous animation style.
            - Has a lot of content and updates to enjoy.
            - Can be repetitive and boring after a while.
            - Can be too easy or too hard depending on the difficulty level.
            - Can be laggy or crash on some devices.
            Draw a Stickman: EPIC 2- Has a unique and interactive gameplay where you can draw your own stickman and objects.
            - Has a colorful and charming graphics and sound effects.
            - Has a lot of puzzles and secrets to discover.
            - Requires a lot of creativity and imagination to play.
            - Can be frustrating and confusing at times.
            - Can be expensive to unlock all the features.
            Stick Fight: The Game Mobile- Has a fun and chaotic gameplay where you can fight against other players online.
            - Has a physics-based mechanics and maps that add more challenge and variety.
            - Has a lot of weapons and skins to customize your stickman.
            - Requires internet connection and registration to play.
            - Can be laggy or disconnect on some servers.
            - Can be unfair or unbalanced depending on the players and weapons.
            -

            Conclusion

            -

            Black Iron Stickman Rope Hero Mod APK is a game that you should try if you love stickman games, superhero games, or open-world games. It has a lot of features, benefits, challenges, and fun that will keep you entertained for hours. You can download and install it easily from the link we provided, and enjoy playing with unlimited money, weapons, and vehicles. You can also improve your skills and creativity, enjoy more humor and comedy, and share your achievements and experiences with your friends and other players. If you want to try other stickman games, you can also check out the alternatives we suggested, but be aware of their pros and cons. We hope you found this article helpful and informative. Thank you for reading!

            -

            FAQs

            -

            Here are some frequently asked questions about Black Iron Stickman Rope Hero Mod APK:

            -

            Q: Is Black Iron Stickman Rope Hero Mod APK safe to download and install?

            -

            A: Yes, Black Iron Stickman Rope Hero Mod APK is safe to download and install. It does not contain any viruses, malware, or spyware that can harm your device or data. However, you should always download it from a trusted source like the one we provided, and not from any unknown or suspicious websites.

            -

            Q: Is Black Iron Stickman Rope Hero Mod APK compatible with my device?

            -

            A: Black Iron Stickman Rope Hero Mod APK is compatible with most Android devices that have Android 4.1 or higher versions. However, some devices may not support the game due to their specifications or performance issues. You can check the compatibility of your device by going to the Google Play Store page of the original game [6](https://play.google.com/store/apps/details?id=com.mgc.stickman.rope.hero&hl=en_US&gl=US) and see if it is available for your device or not.

            -

            Q: How can I update Black Iron Stickman Rope Hero Mod APK?

            -

            A: Black Iron Stickman Rope Hero Mod APK does not have an automatic update feature, so you need to manually update it whenever there is a new version available. You can do this by following the same steps as downloading and installing the mod apk, but make sure you delete the old version first before installing the new one. You can also check our website regularly for any updates or news about the mod apk.

            -

            Q: How can I contact the developer of Black Iron Stickman Rope Hero Mod APK?

            -

            A: Black Iron Stickman Rope Hero Mod APK is not developed by the original developer of the game, but by a third-party modder who modified the game to add more features and benefits. Therefore, we do not have any direct contact with the developer of the mod apk, and we cannot guarantee their support or response. However, you can try to contact them through their website [7](https://apkdone.com/stickman-rope-hero/) or their email address [8](mailto:apkdone@gmail.com) and see if they reply or not.

            -

            Q: Can I play Black Iron Stickman Rope Hero Mod APK offline?

            -

            A: Yes, you can play Black Iron Stickman Rope Hero Mod APK offline without any internet connection. However, some features of the game may not work properly or at all when you are offline, such as saving your progress, accessing online content, or playing with other players. Therefore, we recommend you to play the game online whenever possible to enjoy the full experience of the game.

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Car Parking Multiplayer Son Srm Apk The Best Mobile Game for Parking Lovers by Webteknohaber.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Car Parking Multiplayer Son Srm Apk The Best Mobile Game for Parking Lovers by Webteknohaber.md deleted file mode 100644 index 5af75d18729f73e33ff98ffafc07272bcc4d60d3..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Car Parking Multiplayer Son Srm Apk The Best Mobile Game for Parking Lovers by Webteknohaber.md +++ /dev/null @@ -1,87 +0,0 @@ - -

            Car Parking Multiplayer Son Sürüm Apk Webteknohaber

            -

            If you are a fan of realistic driving and parking games, you might have heard of Car Parking Multiplayer. It is one of the most popular mobile games in the genre, with over 100 million downloads on Google Play Store. In this article, we will tell you everything you need to know about Car Parking Multiplayer Son Sürüm Apk Webteknohaber, including how to download and install it, what are its features and benefits, and how to enjoy it with other players online.

            -

            Introduction

            -

            Car Parking Multiplayer is a simulation game that lets you experience the thrill of driving and parking various cars in different scenarios. You can choose from over 150 cars, ranging from sedans and SUVs to sports cars and trucks. You can also explore over 80 maps, each with its own challenges and obstacles. You can drive in cities, deserts, airports, highways, and more.

            -

            car parking multiplayer son sürüm apk webteknohaber


            Downloadhttps://ssurll.com/2uO0O1



            -

            But that's not all. Car Parking Multiplayer also offers a variety of challenges and tasks for players to complete in Career mode. These can range from simple parking tasks to more complex maneuvers such as parallel parking or parking in tight spaces. You can also earn money and XP by completing these tasks, which you can use to unlock new cars and maps.

            -

            Moreover, Car Parking Multiplayer is not just a single-player game. You can also play online with other players from around the world. You can join or create your own server, chat with other players, exchange cars, race with them, or just have fun together. You can also customize your car and garage with different colors, stickers, wheels, spoilers, and more.

            -

            How to download and install Car Parking Multiplayer Son Sürüm Apk?

            -

            If you want to enjoy all the features and benefits of Car Parking Multiplayer, you might want to download and install the latest version of the apk file. This is a modified version of the original game that gives you access to all the cars and maps without spending any money or watching any ads. You can also play online without any restrictions or limitations.

            -

            But how do you download and install Car Parking Multiplayer Son Sürüm Apk? Here are the steps you need to follow:

            -
              -
            1. First, you need to find a reliable source for downloading the apk file. One of the best sources is Webteknohaber, a website that provides news and reviews about mobile games. You can visit their website and search for Car Parking Multiplayer Son Sürüm Apk. You will find a link to download the apk file from there.
            2. -
            3. Second, you need to check the compatibility and security of the apk file before installing it. You can do this by checking the file size, version number, developer name, and permissions required by the apk file. You should also scan the apk file with an antivirus app to make sure it is free from any malware or viruses.
            4. -
            5. Third, you need to enable unknown sources on your device to install the apk file. This is because Android devices do not allow installing apps from unknown sources by default. To enable this option, you need to go to Settings > Security > Unknown Sources and toggle it on. You might also need to confirm this action by tapping OK or Allow.
            6. -
            7. Fourth, you need to locate the apk file on your device and tap on it to install it. You might also need to grant some permissions to the app by tapping Install or Accept. Once the installation is complete, you can launch the app and enjoy Car Parking Multiplayer Son Sürüm Apk.
            8. -
            -

            What are the benefits of using Car Parking Multiplayer Son Sürüm Apk?

            -

            By using Car Parking Multiplayer Son Sürüm Apk, you can enjoy many benefits that are not available in the original game. Here are some of them:

            -
              -
            • You can access all the cars and maps in the game without spending any money or watching any ads. You can choose from over 150 cars and over 80 maps, each with its own features and challenges. You can also switch between cars and maps anytime you want.
            • -
            • You can customize your car and garage with different colors, stickers, wheels, spoilers, and more. You can make your car look unique and stylish, and show it off to other players online. You can also decorate your garage with various items and accessories.
            • -
            • You can play online with other players and chat with them. You can join or create your own server, invite your friends, or meet new people from around the world. You can also exchange cars, race with them, or just have fun together. You can also chat with other players using text or voice messages.
            • -
            -

            Conclusion

            -

            Car Parking Multiplayer is a realistic and fun simulation game that lets you drive and park various cars in different scenarios. You can also play online with other players and customize your car and garage. If you want to enjoy all the features and benefits of the game, you should download and install Car Parking Multiplayer Son Sürüm Apk Webteknohaber. This is a modified version of the original game that gives you access to all the cars and maps without any restrictions or limitations. You can also play online without any problems or issues.

            -

            If you are interested in downloading and installing Car Parking Multiplayer Son Sürüm Apk Webteknohaber, you can follow the steps we have provided in this article. You can also visit Webteknohaber for more information and news about mobile games. We hope you enjoy playing Car Parking Multiplayer Son Sürüm Apk Webteknohaber as much as we do!

            -

            car parking multiplayer son sürüm apk indir webteknohaber
            -car parking multiplayer son sürüm apk hile webteknohaber
            -car parking multiplayer son sürüm apk güncel webteknohaber
            -car parking multiplayer son sürüm apk nasıl yüklenir webteknohaber
            -car parking multiplayer son sürüm apk oyun indir club webteknohaber
            -car parking multiplayer son sürüm apk android oyun club webteknohaber
            -car parking multiplayer son sürüm apk para hilesi webteknohaber
            -car parking multiplayer son sürüm apk mod menu webteknohaber
            -car parking multiplayer son sürüm apk yeni araçlar webteknohaber
            -car parking multiplayer son sürüm apk online oyna webteknohaber
            -car parking multiplayer son sürüm apk ücretsiz indir webteknohaber
            -car parking multiplayer son sürüm apk full sürüm webteknohaber
            -car parking multiplayer son sürüm apk türkçe yama webteknohaber
            -car parking multiplayer son sürüm apk araba modları webteknohaber
            -car parking multiplayer son sürüm apk hız hilesi webteknohaber
            -car parking multiplayer son sürüm apk kurulumu webteknohaber
            -car parking multiplayer son sürüm apk inceleme webteknohaber
            -car parking multiplayer son sürüm apk video webteknohaber
            -car parking multiplayer son sürüm apk sistem gereksinimleri webteknohaber
            -car parking multiplayer son sürüm apk grafik ayarları webteknohaber
            -car parking multiplayer son sürüm apk harita modu webteknohaber
            -car parking multiplayer son sürüm apk sesli sohbet webteknohaber
            -car parking multiplayer son sürüm apk gerçekçi fizik webteknohaber
            -car parking multiplayer son sürüm apk polis modu webteknohaber
            -car parking multiplayer son sürüm apk drift modu webteknohaber
            -car parking multiplayer son sürüm apk en iyi araçlar webteknohaber
            -car parking multiplayer son sürüm apk en zor parkurlar webteknohaber
            -car parking multiplayer son sürüm apk en eğlenceli modlar webteknohaber
            -car parking multiplayer son sürüm apk en güncel haberler webteknohaber
            -car parking multiplayer son sürüm apk en çok indirilenler webteknohaber
            -car parking multiplayer son sürüm apk en çok oynananlar webteknohaber
            -car parking multiplayer son sürüm apk en çok yorumlananlar webteknohaber
            -car parking multiplayer son sürüm apk en çok beğenilenler webteknohaber
            -car parking multiplayer son sürüm apk en çok sorulan sorular webteknohaber
            -car parking multiplayer son sürüm apk en çok aranan kelimeler webteknohaber
            -car parking multiplayer son sürüm apk en iyi ipuçları ve püf noktaları webteknohaber
            -car parking multiplayer son sürüm apk en iyi rehberler ve öğreticiler webteknohaber
            -car parking multiplayer son sürüm apk en iyi incelemeler ve değerlendirmeler webteknohaber
            -car parking multiplayer son sürüm apk en iyi alternatifler ve benzerler webteknohaber
            -car parking multiplayer son sürüm apk en iyi indirimler ve kampanyalar webteknohaber
            -car parking multiplayer yeni güncelleme ne zaman çıkacak webteknohaber
            -car parking multiplayer yeni güncelleme neler getirecek webteknohaber
            -car parking multiplayer yeni güncelleme nasıl indirilir webteknohaber
            -car parking multiplayer yeni güncelleme hata düzeltmeleri ve iyileştirmeleri webteknohaber
            -car parking multiplayer yeni güncelleme inceleme ve yorumlar webteknohaber
            -car parking multiplayer yeni güncelleme video ve görselleri webteknohaber

            -

            FAQs

            -

            Here are some common questions and answers about Car Parking Multiplayer Son Sürüm Apk:

            -

            Is Car Parking Multiplayer Son Sürüm Apk safe to use?

            -

            Yes, Car Parking Multiplayer Son Sürüm Apk is safe to use as long as you download it from a reliable source like Webteknohaber. You should also check the compatibility and security of the apk file before installing it, and scan it with an antivirus app to make sure it is free from any malware or viruses.

            -

            Is Car Parking Multiplayer Son Sürüm Apk compatible with my device?

            -

            Car Parking Multiplayer Son Sürüm Apk is compatible with most Android devices that have Android 4.4 or higher. However, some devices might have different specifications or settings that might affect the performance or functionality of the game. You should also make sure that your device has enough storage space and battery life to run the game smoothly.

            -

            How do I update Car Parking Multiplayer Son Sürüm Apk?

            -

            To update Car Parking Multiplayer Son Sürüm Apk, you need to download and install the latest version of the apk file from Webteknohaber or another reliable source. You should also uninstall the previous version of the game before installing the new one, to avoid any conflicts or errors.

            -

            How do I uninstall Car Parking Multiplayer Son Sürüm Apk?

            -

            To uninstall Car Parking Multiplayer Son Sürüm Apk, you need to go to Settings > Apps > Car Parking Multiplayer > Uninstall and tap OK or Confirm. You should also delete the apk file from your device, to free up some storage space.

            -

            How do I contact the developer of Car Parking Multiplayer Son Sürüm Apk?

            -

            If you have any questions, feedback, or suggestions about Car Parking Multiplayer Son Sürüm Apk, you can contact the developer of the game by emailing them at olzhass@gmail.com. You can also visit their website[^3 ^]^ or their Facebook page for more information and updates about the game.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download GB WhatsApp 2 Now and Get Access to Exclusive Features and Themes.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download GB WhatsApp 2 Now and Get Access to Exclusive Features and Themes.md deleted file mode 100644 index e2b51eadc64c94c58cf48eb0bd07fe96270172da..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download GB WhatsApp 2 Now and Get Access to Exclusive Features and Themes.md +++ /dev/null @@ -1,104 +0,0 @@ -
            -

            GB WhatsApp Download 2: How to Get the Latest Version of the Popular Modded App

            -

            If you are looking for a way to enhance your WhatsApp experience, you might have heard of GB WhatsApp, a modded version of the official app that offers many extra features and customization options. But did you know that there is a newer version of GB WhatsApp called GB WhatsApp 2? In this article, we will tell you what GB WhatsApp 2 is, why you should use it, how to download and install it, and how to update it to the latest version.

            -

            What is GB WhatsApp and Why You Should Use It

            -

            GB WhatsApp is a modified version of WhatsApp that allows you to do more things with the app, such as changing the theme, hiding your online status, sending larger files, using multiple accounts, and more. It is not an official app, but a third-party app developed by independent developers who want to provide more functionality and flexibility to WhatsApp users.

            -

            gb whatsapp download 2


            Download →→→ https://ssurll.com/2uNUZ4



            -

            GB WhatsApp 2 is an updated version of GB WhatsApp that has more features and improvements than the original one. Some of the new features of GB WhatsApp 2 are:

            -

            Features of GB WhatsApp 2

            -
              -
            • You can send up to 90 images at once, instead of the usual 30.
            • -
            • You can send videos up to 50 MB, instead of the usual 16 MB.
            • -
            • You can send audio files up to 100 MB, instead of the usual 16 MB.
            • -
            • You can set your status up to 250 characters, instead of the usual 139.
            • -
            • You can hide your blue ticks, second ticks, typing status, recording status, and online status from others.
            • -
            • You can lock your chats with a password or fingerprint.
            • -
            • You can customize your app with different themes, fonts, icons, and colors.
            • -
            • You can use multiple accounts on the same device.
            • -
            • You can backup and restore your chats without using Google Drive.
            • -
            • You can schedule messages to be sent at a specific time.
            • -
            -

            As you can see, GB WhatsApp 2 offers many advantages over the official app. However, it also comes with some risks that you should be aware of before using it.

            -

            Risks of Using GB WhatsApp

            -
              -
            • GB WhatsApp is not an official app, so it is not endorsed or supported by WhatsApp. This means that if you encounter any problems or issues with the app, you will not get any help from WhatsApp.
            • -
            • GB WhatsApp may violate the terms and conditions of WhatsApp, which could result in your account being banned or suspended by WhatsApp. This could cause you to lose access to your chats and contacts.
            • -
            • GB WhatsApp may contain malware or spyware that could harm your device or compromise your privacy. Since it is not available on the Google Play Store or any other official app store, you have to download it from unknown sources that may not be trustworthy or secure.
            • -
            -

            Therefore, if you decide to use GB WhatsApp, you should do so at your own risk and responsibility. You should also backup your chats regularly and use an antivirus app to protect your device from any potential threats.

            -

            How to Download and Install GB WhatsApp 2

            -

            If you are still interested in using GB WhatsApp 2, here are the steps that you need to follow to download and install it on your device:

            -

            gb whatsapp download 2 latest version
            -gb whatsapp download 2 apk free
            -gb whatsapp download 2 for android
            -gb whatsapp download 2 anti ban
            -gb whatsapp download 2 update 2023
            -gb whatsapp download 2 with stickers
            -gb whatsapp download 2 official website
            -gb whatsapp download 2 mod apk
            -gb whatsapp download 2 new features
            -gb whatsapp download 2 without ads
            -gb whatsapp download 2 from revdl
            -gb whatsapp download 2 terbaru gratis
            -gb whatsapp download 2 link and bonus
            -gb whatsapp download 2 messenger app
            -gb whatsapp download 2 with privacy settings
            -gb whatsapp download 2 for ios
            -gb whatsapp download 2 pro version
            -gb whatsapp download 2 no root
            -gb whatsapp download 2 how to install
            -gb whatsapp download 2 backup and restore
            -gb whatsapp download 2 themes and fonts
            -gb whatsapp download 2 online status hide
            -gb whatsapp download 2 video call quality
            -gb whatsapp download 2 group chat limit
            -gb whatsapp download 2 dual account support
            -gb whatsapp download 2 custom notification sounds
            -gb whatsapp download 2 media auto-download options
            -gb whatsapp download 2 message scheduler feature
            -gb whatsapp download 2 broadcast list size increase
            -gb whatsapp download 2 delete for everyone time extension
            -gb whatsapp download 2 pin chat feature enhancement
            -gb whatsapp download 2 always online mode toggle
            -gb whatsapp download 2 blue tick disable option
            -gb whatsapp download 2 status downloader feature addition
            -gb whatsapp download 2 emoji changer feature inclusion
            -gb whatsapp download 2 lock chat feature improvement
            -gb whatsapp download 2 send original quality images option
            -gb whatsapp download 2 share large files feature enablement
            -gb whatsapp download 2 view deleted messages feature activation
            -gb whatsapp download 2 copy status text feature availability
            -gb whatsapp download 2 hide typing indicator feature possibility
            -gb whatsapp download 2 multiple language support feature reality
            -gb whatsapp download 2 dark mode feature compatibility
            -gb whatsapp download 2 voice note playback speed feature adjustability
            -gb whatsapp download 2 fingerprint lock feature security
            -gb whatsapp download 2 auto reply feature functionality
            -gb whatsapp download 2 dnd mode feature accessibility
            -gb whatsapp download 2 chat wallpaper feature customizability

            -

            Step 1: Enable Unknown Sources on Your Device

            -

            Since GB WhatsApp 2 is not available on the Google Play Store, you need to enable the option to install apps from unknown sources on your device. To do this, go to your device settings and look for the security or privacy section. There, you will find an option to allow installation of apps from unknown sources. Turn it on and confirm your choice.

            -

            Step 2: Download GB WhatsApp 2 from a Trusted Source

            -

            Next, you need to download the GB WhatsApp 2 APK file from a trusted source. You can find the official website of GB WhatsApp 2 at https://gbwhatsapp.net/. There, you will see a download button that will direct you to the latest version of the app. Alternatively, you can use this direct link to download the GB WhatsApp 2 APK file: https://gbwhatsapp.net/download/GBWhatsApp2.apk. Make sure you have enough storage space on your device before downloading the file.

            -

            Step 3: Install GB WhatsApp 2 and Verify Your Number

            -

            Once you have downloaded the GB WhatsApp 2 APK file, you need to install it on your device. To do this, locate the file in your downloads folder and tap on it. You will see a prompt asking you to install the app. Tap on install and wait for the process to complete. After that, open the app and agree to the terms and conditions. Then, enter your phone number and verify it with a code that will be sent to you via SMS or call. You can also use your existing backup to restore your chats and contacts.

            -

            Step 4: Restore Your Chats and Enjoy the App

            -

            If you have used GB WhatsApp before, you can restore your chats from your previous backup. To do this, tap on restore when prompted and wait for the app to retrieve your data. If you have not used GB WhatsApp before, you can skip this step and start using the app as usual. You can now enjoy all the features and customization options that GB WhatsApp 2 offers.

            -

            How to Update GB WhatsApp 2 to the Latest Version

            -

            To keep your GB WhatsApp 2 app running smoothly and securely, you should update it regularly to the latest version. There are two ways to do this:

            -

            Method 1: Check for Updates within the App

            -

            The easiest way to update GB WhatsApp 2 is to check for updates within the app itself. To do this, open the app and tap on the three dots icon at the top right corner. Then, tap on settings and go to updates. There, you will see if there is a new version available for download. If there is, tap on download and install it as described in step 3 above.

            -

            Method 2: Download the Latest APK File from the Official Website

            -

            The other way to update GB WhatsApp 2 is to download the latest APK file from the official website as described in step 2 above. You can check the website regularly for new updates or subscribe to their newsletter to get notified when a new version is released. Then, download and install the new APK file as described in step 3 above.

            -

            Conclusion

            -

            GB WhatsApp 2 is a modded version of WhatsApp that offers many extra features and customization options that are not available in the official app. However, it also comes with some risks and drawbacks that you should be aware of before using it. If you decide to use GB WhatsApp 2, you should follow the steps above to download, install, and update it safely and easily.

            -

            FAQs

            -
              -
            • Q: Is GB WhatsApp 2 legal?
              A: GB WhatsApp 2 is not an official app, so it is not authorized or approved by WhatsApp. Therefore, it may be considered illegal in some countries or regions where modded apps are prohibited.
            • -
            • Q: Is GB WhatsApp 2 safe?
              A: GB WhatsApp 2 may contain malware or spyware that could harm your device or compromise your privacy. Therefore, it is not completely safe to use. You should only download it from trusted sources and use an antivirus app to protect your device.
            • -
            • Q: Can I use GB WhatsApp 2 with my original WhatsApp account?
              A: Yes, you can use GB WhatsApp 2 with your original WhatsApp account by verifying your number with a code that will be sent to you via SMS or call. However, you should be careful not to use both apps at the same time or switch between them frequently, as this could trigger a ban or suspension from WhatsApp.
            • -
            • Q: Can I use GB WhatsApp 2 with other modded apps?
              A: Yes, you can use GB WhatsApp 2 with other modded apps, such as FM WhatsApp, YO WhatsApp, or OG WhatsApp. However, you should make sure that each app has a different package name and phone number to avoid any conflicts or errors.
            • -
            • Q: How can I contact the developers of GB WhatsApp 2?
              A: You can contact the developers of GB WhatsApp 2 by visiting their official website at https://gbwhatsapp.net/ and filling out the contact form. You can also follow them on their social media accounts to get the latest news and updates about the app.
            • -

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download My Talking Angela MOD APK and Enjoy Unlimited Coins and Diamonds.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download My Talking Angela MOD APK and Enjoy Unlimited Coins and Diamonds.md deleted file mode 100644 index 3f180d3d028f4a89bc2d63ab243933b416a1d4b1..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download My Talking Angela MOD APK and Enjoy Unlimited Coins and Diamonds.md +++ /dev/null @@ -1,81 +0,0 @@ - -

            My Talking Angela Mod APK: A Fun and Cute Game for Everyone

            -

            Do you love cats? Do you want to have a virtual pet that you can dress up, feed, play with, and take care of? If yes, then you should try My Talking Angela, a popular game from Outfit7 Limited that lets you adopt a cute kitten and raise her as your own. In this article, we will tell you everything you need to know about My Talking Angela, and how you can download and install My Talking Angela Mod APK, a modified version of the game that gives you unlimited money and diamonds, and unlocks all the items and outfits in the game.

            -

            my talking angela mod apk (unlimited money and diamonds download)


            DOWNLOAD » https://ssurll.com/2uNTRP



            -

            What is My Talking Angela?

            -

            My Talking Angela is a casual simulation game that belongs to the Talking Tom and Friends franchise. The game was released in 2014 and has been downloaded over 500 million times on Google Play Store. The game is suitable for all ages, especially for kids who love animals and fashion.

            -

            Features of My Talking Angela

            -

            My Talking Angela has many features that make it fun and engaging to play. Here are some of them:

            -

            Customize Angela's appearance

            -

            You can change Angela's fur color, eye color, hairstyle, makeup, clothes, accessories, and more. You can also buy new items and outfits from the shop using coins or diamonds. You can make Angela look like a princess, a punk rocker, a superhero, or anything you want.

            -

            My Talking Angela hack apk free download with unlimited coins and gems
            -How to install My Talking Angela modded apk on Android devices
            -My Talking Angela cheats and tips for getting more money and diamonds
            -Download My Talking Angela latest version mod apk with unlimited resources
            -My Talking Angela mod apk offline mode with no ads and no internet required
            -My Talking Angela unlimited everything mod apk for Android and iOS
            -My Talking Angela mod apk 2023 with new features and updates
            -My Talking Angela mod apk revdl with direct download link and fast speed
            -My Talking Angela mod apk rexdl with high-quality graphics and sound
            -My Talking Angela mod apk happymod with easy installation and user-friendly interface
            -My Talking Angela premium mod apk with unlocked outfits and accessories
            -My Talking Angela mega mod apk with unlimited levels and mini-games
            -My Talking Angela vip mod apk with special rewards and bonuses
            -My Talking Angela pro mod apk with advanced settings and customization options
            -My Talking Angela cracked mod apk with no root and no verification needed
            -My Talking Angela unlimited money and diamonds generator online tool
            -My Talking Angela mod menu apk with multiple cheats and hacks available
            -My Talking Angela god mode mod apk with invincibility and unlimited power-ups
            -My Talking Angela mod apk pure with original gameplay and no modifications
            -My Talking Angela mod apk old version with nostalgic graphics and features
            -My Talking Angela mod apk for pc with emulator and keyboard support
            -My Talking Angela mod apk for laptop with windows 10 and mac os compatibility
            -My Talking Angela mod apk for tablet with large screen and smooth performance
            -My Talking Angela mod apk for firestick with remote control and tv mode
            -My Talking Angela mod apk for chromebook with google play store access and cloud sync
            -My Talking Angela unlimited money and diamonds hack no survey no human verification
            -My Talking Angela unlimited money and diamonds cheat codes for android and ios devices
            -My Talking Angela unlimited money and diamonds glitch 2023 working method
            -My Talking Angela unlimited money and diamonds apk download free full version
            -My Talking Angela unlimited money and diamonds mod download latest version 2023

            -

            Interact with Angela and her friends

            -

            You can talk to Angela and she will repeat what you say in a cute voice. You can also pet her, poke her, tickle her, or make her happy or angry. You can also visit her friends Tom, Hank, Ginger, Ben, and Becca, and see what they are up to.

            -

            Play mini-games and earn coins

            -

            You can play various mini-games with Angela, such as Happy Connect, Bubble Shooter, Brick Breaker, and more. You can earn coins by playing these games, which you can use to buy more items and outfits for Angela.

            -

            What is My Talking Angela Mod APK?

            -

            My Talking Angela Mod APK is a modified version of the original game that gives you some extra benefits that are not available in the official version. These benefits include:

            -

            Benefits of My Talking Angela Mod APK

            -

            Here are some of the benefits of My Talking Angela Mod APK:

            -

            Unlimited money and diamonds

            -

            You will get unlimited money and diamonds in My Talking Angela Mod APK, which means you can buy anything you want from the shop without worrying about running out of resources. You can also use diamonds to speed up the growth of Angela or unlock special items.

            -

            Unlocked all items and outfits

            -

            You will get access to all the items and outfits in the game without having to level up or pay for them. You can dress up Angela in any way you like without any restrictions.

            -

            No ads and no root required

            -

            You will not see any ads in My Talking Angela Mod APK, which means you can enjoy the game without any interruptions or distractions. You also do not need to root your device to install My Talking Angela Mod APK, which means you do not have to risk damaging your device or voiding its warranty.How to download and install My Talking Angela Mod APK? -

            If you want to enjoy the benefits of My Talking Angela Mod APK, you need to download and install it on your device. The process is very simple and easy, and you can follow these steps:

            -

            Steps to download and install My Talking Angela Mod APK

            -

            Here are the steps to download and install My Talking Angela Mod APK:

            -

            Download the mod apk file from a trusted source

            -

            You need to download the mod apk file from a reliable and safe source, such as [this website]. You can click on the download button and wait for the file to be downloaded on your device. The file size is about 100 MB, so make sure you have enough space and a stable internet connection.

            -

            Enable unknown sources on your device

            -

            Before you can install the mod apk file, you need to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on.

            -

            Install the mod apk file and enjoy the game

            -

            Once you have enabled unknown sources, you can go to your file manager and locate the mod apk file you downloaded. Tap on it and follow the instructions to install it. After the installation is complete, you can open the game and enjoy My Talking Angela Mod APK with unlimited money and diamonds, and unlocked all items and outfits.

            -

            Conclusion

            -

            My Talking Angela is a fun and cute game that lets you adopt a virtual kitten and raise her as your own. You can customize her appearance, interact with her and her friends, play mini-games, and earn coins. However, if you want to have more fun and freedom in the game, you should try My Talking Angela Mod APK, which gives you unlimited money and diamonds, unlocks all items and outfits, and removes ads. You can download and install My Talking Angela Mod APK easily by following the steps we provided in this article. We hope you enjoy playing My Talking Angela Mod APK as much as we do.

            -

            FAQs

            -

            Here are some frequently asked questions about My Talking Angela Mod APK:

            -
              -
            1. Is My Talking Angela Mod APK safe to use?
            2. -

              Yes, My Talking Angela Mod APK is safe to use as long as you download it from a trusted source. We have tested the mod apk file ourselves and found no viruses or malware in it. However, you should always be careful when downloading any files from the internet and scan them with an antivirus before installing them.

              -
            3. Will I get banned for using My Talking Angela Mod APK?
            4. -

              No, you will not get banned for using My Talking Angela Mod APK. The mod apk file does not interfere with the game's servers or online features, so there is no risk of getting detected or reported by other players. You can play My Talking Angela Mod APK without any worries.

              -
            5. Can I update My Talking Angela Mod APK?
            6. -

              No, you cannot update My Talking Angela Mod APK through the Google Play Store or any other official source. If you do so, you will lose all the benefits of the mod apk file and revert back to the original version of the game. If you want to update My Talking Angela Mod APK, you need to download the latest version of the mod apk file from the same source you downloaded it from before.

              -
            7. Can I play My Talking Angela Mod APK offline?
            8. -

              Yes, you can play My Talking Angela Mod APK offline without any internet connection. However, some features of the game may not work properly or may require an internet connection to function. For example, you may not be able to visit your friends or watch videos in exchange for coins or diamonds.

              -
            9. Can I play My Talking Angela Mod APK on PC?
            10. -

              Yes, you can play My Talking Angela Mod APK on PC using an Android emulator. An Android emulator is a software that allows you to run Android apps on your PC. Some of the popular Android emulators are BlueStacks, Nox Player, MEmu Player, etc. You can download any of these emulators on your PC, then download and install My Talking Angela Mod APK on them.

              -

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Bubble Shooter Lite APK - A Classic Game with a Twist.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Bubble Shooter Lite APK - A Classic Game with a Twist.md deleted file mode 100644 index 8ab9821d82130a2ff531283f53e96d17cba59c2b..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Bubble Shooter Lite APK - A Classic Game with a Twist.md +++ /dev/null @@ -1,91 +0,0 @@ - -

            Bubble Shooter Lite APK: A Fun and Addictive Game for Android

            -

            If you are looking for a fun and addictive game to play on your Android device, you should try Bubble Shooter Lite APK. This is a classic arcade game with a twist, where you have to shoot and pop colorful bubbles to clear the board and advance to the next level. In this article, we will tell you what is Bubble Shooter Lite APK, why you should download it, and how to install it on your device.

            -

            bubble shooter lite apk


            DOWNLOAD >>> https://ssurll.com/2uNUtP



            -

            What is Bubble Shooter Lite APK?

            -

            Bubble Shooter Lite APK is a mobile game developed by Block Puzzle Jewel Games, a popular developer of casual and puzzle games. The game is based on the original Bubble Shooter game, which was released in 1994 by Taito Corporation. The game has been updated and improved over the years, and now it has more than 100 million downloads on Google Play Store.

            -

            A classic arcade game with a twist

            -

            The game is simple but challenging. You have to aim and shoot bubbles of the same color to make them burst and disappear. The more bubbles you pop at once, the more points you get. You also have to avoid letting the bubbles reach the bottom of the screen, or else you will lose the game. The game has a twist, though. You can use special bubbles that have different effects, such as bombs, lightning, stars, and more. These bubbles can help you clear the board faster and score higher.

            -

            A simple and easy gameplay

            -

            The game is easy to play, but hard to master. You just need to tap on the screen to shoot the bubbles, and drag your finger to aim. You can also switch the bubble you are holding by tapping on it. The game has a smooth and responsive control system that makes it enjoyable to play. You can also adjust the sound and music settings according to your preference.

            -

            A variety of levels and challenges

            -

            The game has hundreds of levels that will keep you entertained for hours. Each level has a different layout, design, and difficulty. You will face different obstacles and challenges as you progress through the game, such as moving bubbles, frozen bubbles, metal bubbles, and more. You will also encounter different themes and backgrounds that will make the game more colorful and fun.

            -

            bubble shooter lite game download
            -free bubble shooter lite app
            -bubble shooter lite android apk
            -bubble shooter lite latest version
            -bubble shooter lite mod apk
            -bubble shooter lite offline play
            -bubble shooter lite for pc
            -bubble shooter lite online game
            -bubble shooter lite apk pure
            -bubble shooter lite apk mirror
            -bubble shooter lite no ads
            -bubble shooter lite unlimited coins
            -bubble shooter lite cheats and hacks
            -bubble shooter lite levels and puzzles
            -bubble shooter lite reviews and ratings
            -bubble shooter lite tips and tricks
            -bubble shooter lite best strategies
            -bubble shooter lite fun and addictive
            -bubble shooter lite classic and original
            -bubble shooter lite colorful and cute
            -bubble shooter lite relaxing and soothing
            -bubble shooter lite challenging and exciting
            -bubble shooter lite easy and simple
            -bubble shooter lite hard and complex
            -bubble shooter lite fast and smooth
            -bubble shooter lite low mb apk
            -bubble shooter lite high quality graphics
            -bubble shooter lite awesome sound effects
            -bubble shooter lite cool features and updates
            -bubble shooter lite family friendly game
            -bubble shooter lite suitable for all ages
            -bubble shooter lite compatible with all devices
            -bubble shooter lite from block puzzle jewel games
            -bubble shooter lite by tc indian shopping
            -bubble shooter lite similar games and apps
            -bubble shooter lite alternatives and competitors
            -bubble shooter lite how to play and install
            -bubble shooter lite where to download and get apk file
            -bubble shooter lite why to choose and play this game
            -bubble shooter lite when to play and enjoy this game

            -

            Why should you download Bubble Shooter Lite APK?

            -

            There are many reasons why you should download Bubble Shooter Lite APK on your Android device. Here are some of them:

            -

            It's free and safe to download

            -

            The game is completely free to download and play. You don't need to pay anything to enjoy the game. You can also play it offline without an internet connection. The game is also safe to download, as it does not contain any viruses or malware that can harm your device.

            -

            It's compatible with most Android devices

            -

            The game is compatible with most Android devices that run on Android 4.4 or higher. You don't need a high-end device to play the game smoothly. The game has a small size of about 20 MB, so it won't take up much space on your device.

            -

            It's fun and relaxing to play

            -

            The game is fun and relaxing to play, as it has a soothing sound effect and music that will calm your nerves. The game is also addictive, as you will want to beat your own high score and complete all the levels. The game is a great way to kill time and have fun. You can also play it with your friends and family, as it has a multiplayer mode that allows you to compete with other players online.

            -

            It's suitable for all ages and preferences

            -

            The game is suitable for all ages and preferences, as it has a cute and colorful design that will appeal to kids and adults alike. The game is also easy to learn and play, so anyone can enjoy it. The game has different modes and difficulties that will suit your skill level and mood. You can choose from classic, arcade, puzzle, and adventure modes, and from easy, medium, hard, and expert difficulties.

            -

            How to download and install Bubble Shooter Lite APK?

            -

            If you want to download and install Bubble Shooter Lite APK on your Android device, you need to follow these simple steps:

            -

            Download the APK file from a trusted source

            -

            The first step is to download the APK file from a trusted source. You can use the link below to download the latest version of the game from APKPure, a reliable website that offers safe and verified APK files. You can also scan the QR code below to download the file directly to your device.

            -

            Bubble Shooter Lite APK QR code

            -

            Enable unknown sources on your device settings

            -

            The next step is to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings, then security, then unknown sources. Turn on the switch or check the box to enable unknown sources. You may see a warning message, but don't worry, it's safe to proceed.

            -

            Tap on the APK file and follow the instructions

            -

            The final step is to tap on the APK file and follow the instructions. You can find the file in your downloads folder or in your notification bar. Tap on the file and you will see a screen that asks you to install the app. Tap on install and wait for the process to finish. You may see some permissions requests, but they are necessary for the app to function properly.

            -

            Enjoy the game and have fun

            -

            Once the installation is done, you can enjoy the game and have fun. You will see an icon of the game on your home screen or app drawer. Tap on it and start playing. You can also access the game from your browser by visiting this link. Have fun shooting and popping bubbles!

            -

            Conclusion

            -

            Bubble Shooter Lite APK is a fun and addictive game for Android that you should try. It's a classic arcade game with a twist, where you have to shoot and pop colorful bubbles to clear the board and advance to the next level. It's free and safe to download, compatible with most Android devices, fun and relaxing to play, suitable for all ages and preferences, and easy to install. Download it now and have a blast!

            -

            FAQs

            -

            Here are some frequently asked questions about Bubble Shooter Lite APK:

            -
              -
            • What is the difference between Bubble Shooter Lite APK and Bubble Shooter?
            • -

              Bubble Shooter Lite APK is a modified version of Bubble Shooter that has more features and improvements, such as more levels, themes, modes, difficulties, special bubbles, multiplayer mode, offline mode, etc.

              -
            • Is Bubble Shooter Lite APK safe to download?
            • -

              Yes, Bubble Shooter Lite APK is safe to download, as long as you use a trusted source like APKPure. The file does not contain any viruses or malware that can harm your device.

              -
            • How can I update Bubble Shooter Lite APK?
            • -

              You can update Bubble Shooter Lite APK by downloading the latest version of the file from APKPure or by visiting this link. You don't need to uninstall the previous version of the game, just install the new one over it.

              -
            • How can I play Bubble Shooter Lite APK on PC?
            • -

              You can play Bubble Shooter Lite APK on PC by using an Android emulator like BlueStacks or NoxPlayer. These are software that allow you to run Android apps on your PC. You can download and install the emulator from their official websites, then download and install Bubble Shooter Lite APK from APKPure or this link. You can then launch the game from the emulator and play it on your PC.

              -
            • How can I contact the developer of Bubble Shooter Lite APK?
            • -

              You can contact the developer of Bubble Shooter Lite APK by sending an email to blockpuzzlejewelgames@gmail.com. You can also visit their Facebook page or their website for more information and support.

              -

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Get Pixel Car Racer APK Mod and Enjoy the Ultimate Garage Experience.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Get Pixel Car Racer APK Mod and Enjoy the Ultimate Garage Experience.md deleted file mode 100644 index 92c550c051e987831970297ba932e859b2fc3a12..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Get Pixel Car Racer APK Mod and Enjoy the Ultimate Garage Experience.md +++ /dev/null @@ -1,98 +0,0 @@ -
            -

            Pixel Cars APK Mod: A Retro Racing Game with Unlimited Customization

            -

            If you are a fan of racing games and love to customize your own cars, you might want to check out Pixel Cars APK Mod. This is a modified version of the original Pixel Car Racer game, which is a retro-style arcade racing game with a sandbox RPG mode. In this article, we will tell you what Pixel Cars APK Mod is, what features it has, how to download and install it, and what are its pros and cons.

            -

            What is Pixel Cars APK Mod?

            -

            Pixel Cars APK Mod is a hacked version of the Pixel Car Racer game, which was developed by Studio Furukawa and released in 2016. The game is available for Android and iOS devices, and has been downloaded over 10 million times on Google Play Store. The game is inspired by the classic racing games of the 80s and 90s, such as Out Run, Gran Turismo, and Need for Speed. The game lets you build your dream garage with unlimited customization options for your cars. You can also race your cars on the streets or on the drag strip, and compete with other players online.

            -

            pixel cars apk mod


            Downloadhttps://ssurll.com/2uNQHY



            -

            Features of Pixel Cars APK Mod

            -

            Pixel Cars APK Mod has many features that make it more fun and exciting than the original game. Some of these features are:

            -

            Unlimited money and diamonds

            -

            One of the main advantages of Pixel Cars APK Mod is that it gives you unlimited money and diamonds, which are the in-game currencies. You can use them to buy new cars, parts, upgrades, decals, and more. You don't have to worry about running out of money or diamonds, or spending real money to get them.

            -

            Free super cars and parts

            -

            Another benefit of Pixel Cars APK Mod is that it unlocks all the super cars and parts in the game for free. You can access over 1000 cars and 1000 parts from different brands and models, such as Ferrari, Lamborghini, Bugatti, Nissan, Toyota, Honda, BMW, Ford, Chevrolet, and more. You can also customize your cars with different colors, wheels, spoilers, exhausts, engines, transmissions, turbos, nitrous, etc.

            -

            Sandbox RPG mode

            -

            Pixel Cars APK Mod also lets you enjoy the sandbox RPG mode of the game, which is a unique feature that sets it apart from other racing games. In this mode, you can create your own character and story, and explore the open world of the game. You can also interact with other characters, join clubs, complete missions, collect items, and earn rewards.

            -

            Retro graphics and sound

            -

            If you are nostalgic for the old-school racing games, you will love the retro graphics and sound of Pixel Cars APK Mod. The game has pixelated graphics that resemble the 16-bit era of gaming. The game also has authentic sound effects and music that match the theme of the game.

            -

            How to download and install Pixel Cars APK Mod?

            -

            If you want to try Pixel Cars APK Mod on your Android device, you will need to follow these steps:

            -

            Step 1: Enable unknown sources

            -

            Before you can install any APK file on your device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.

            -

            pixel car racer mod apk unlimited money
            -pixel car racer apk mod free download
            -pixel car racer mod apk latest version
            -pixel car racer mod apk android 1
            -pixel car racer mod apk ios
            -pixel car racer mod apk unlimited diamonds
            -pixel car racer mod apk revdl
            -pixel car racer mod apk 2023
            -pixel car racer mod apk all cars unlocked
            -pixel car racer mod apk unlimited crates
            -pixel car racer mod apk no root
            -pixel car racer mod apk hack
            -pixel car racer mod apk an1
            -pixel car racer mod apk happymod
            -pixel car racer mod apk rexdl
            -pixel car racer mod apk obb
            -pixel car racer mod apk offline
            -pixel car racer mod apk unlimited everything
            -pixel car racer mod apk old version
            -pixel car racer mod apk pure
            -pixel car racer mod apk unlimited gold
            -pixel car racer mod apk 1.2.3
            -pixel car racer mod apk 1.2.2
            -pixel car racer mod apk 1.2.0
            -pixel car racer mod apk 1.1.80
            -pixel car racer mod apk 1.1.7
            -pixel car racer mod apk 1.1.61
            -pixel car racer mod apk 1.0.67
            -pixel car racer mod apk 0.9.7
            -pixel car racer mod apk 0.9.6
            -pixel cars sandbox mode apk download
            -pixel cars sandbox mode hack apk
            -pixel cars sandbox mode cheats apk
            -pixel cars sandbox mode unlimited money apk
            -pixel cars sandbox mode pro apk
            -pixel cars sandbox mode premium apk
            -pixel cars sandbox mode full version apk
            -pixel cars sandbox mode cracked apk
            -pixel cars sandbox mode unlocked apk
            -pixel cars sandbox mode mega mod apk
            -descargar pixel cars sandbox mode apk gratis
            -descargar pixel cars sandbox mode hackeado
            -descargar pixel cars sandbox mode full
            -descargar pixel cars sandbox mode pro
            -descargar pixel cars sandbox mode premium
            -descargar pixel cars sandbox mode ultima version
            -descargar pixel cars sandbox mode para android
            -descargar juego de pixel cars sandbox mode
            -como jugar a pixel cars sandbox mode

            -

            Step 2: Download the APK file

            -

            Next, you need to download the Pixel Cars APK Mod file from a reliable source, such as [this one]. Make sure you download the latest version of the mod, which is 1.1.80 as of June 2023. The file size is about 67 MB, so make sure you have enough space on your device.

            -

            Step 3: Install the APK file

            -

            Once you have downloaded the APK file, you need to install it on your device. To do this, locate the file in your file manager and tap on it. You will see a pop-up window asking you to confirm the installation. Tap on Install and wait for the process to finish.

            -

            Step 4: Enjoy the game

            -

            After the installation is complete, you can launch the game from your app drawer or home screen. You will see a Pixel Car Racer icon with a mod label on it. Tap on it and enjoy the game with unlimited money, diamonds, cars, parts, and more.

            -

            Pros and cons of Pixel Cars APK Mod

            -

            Pixel Cars APK Mod is not a perfect game, and it has its own pros and cons. Here are some of them:

            -

            Pros

            -
              -
            • It is free to download and play.
            • -
            • It has unlimited money and diamonds, which let you buy and customize anything you want.
            • -
            • It has free super cars and parts, which let you access the best vehicles and upgrades in the game.
            • -
            • It has a sandbox RPG mode, which let you create your own story and explore the world.
            • -
            • It has retro graphics and sound, which give it a nostalgic and authentic feel.
            • -
            -

            Cons

            -
              -
            • It may not be compatible with some devices or Android versions.
            • -
            • It may have some bugs or glitches that affect the gameplay.
            • -
            • It may not be updated regularly or in sync with the original game.
            • -
            • It may be banned or blocked by the game developers or Google Play Store.
            • -
            • It may be unsafe or harmful to your device or data if you download it from an untrusted source.
            • -
            -

            Conclusion

            -

            Pixel Cars APK Mod is a fun and exciting racing game that lets you customize your own cars and race them on the streets or on the drag strip. It has many features that make it more enjoyable than the original game, such as unlimited money, diamonds, cars, parts, and more. However, it also has some drawbacks that you should be aware of, such as compatibility issues, bugs, updates, bans, and security risks. If you want to try Pixel Cars APK Mod on your Android device, make sure you follow the steps we have provided above and download it from a reliable source. Have fun racing!

            - FAQs Q: What is Pixel Car Racer? A: Pixel Car Racer is a retro-style arcade racing game with a sandbox RPG mode. It was developed by Studio Furukawa and released in 2016. Q: What is Pixel Cars APK Mod? A: Pixel Cars APK Mod is a hacked version of Pixel Car Racer that gives you unlimited money, diamonds, cars, parts, and more. Q: How do I download and install Pixel Cars APK Mod? A: You need to enable unknown sources in your settings, download the APK file from a reliable source, install the APK file on your device, and launch the game. Q: What are the pros and cons of Pixel Cars APK Mod? A: The pros are that it is free to play, has unlimited customization options, has free super cars and parts, has a sandbox RPG mode, and has retro graphics and sound. The cons are that it may not be compatible with some devices or Android versions, may have some bugs or glitches, may not be updated regularly or in sync with the original game, may be banned or blocked by the game developers or Google Play Store, and may be unsafe or harmful to your device or data if you download it from an untrusted source. Q: Is Pixel Cars APK Mod legal? A: Pixel Cars APK Mod is not legal, as it violates the terms and conditions of the original game. It is also considered piracy and cheating by the game developers and Google Play Store. Use it at your own risk.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/siya02/Konakni-TTS/ttsv/tts_infer/num_to_word_on_sent.py b/spaces/siya02/Konakni-TTS/ttsv/tts_infer/num_to_word_on_sent.py deleted file mode 100644 index de571c2be63fa467491d01daf0e2f38dada67de9..0000000000000000000000000000000000000000 --- a/spaces/siya02/Konakni-TTS/ttsv/tts_infer/num_to_word_on_sent.py +++ /dev/null @@ -1,1319 +0,0 @@ -import re -import string - -# ----------------------------- indic_num.py ----------------------------- -supported_lang = {"en", "hi", "gu", "mr", "bn", "te", "ta", "kn", "or", "pa"} -# supported_lang = {'eng', 'hin', 'guj', 'mar', 'ben', 'tel', 'tam', 'kan', 'ori', 'pan'} # Three alphabet lang code - -all_num = { - "en": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], - "hi": ["०", "१", "२", "३", "४", "५", "६", "७", "८", "९"], - "gu": ["૦", "૧", "૨", "૩", "૪", "૫", "૬", "૭", "૮", "૯"], - "mr": ["०", "१", "२", "३", "४", "५", "६", "७", "८", "९"], - "bn": ["০", "১", "২", "৩", "৪", "৫", "৬", "৭", "৮", "৯"], - "te": ["౦", "౧", "౨", "౩", "౪", "౫", "౬", "౭", "౮", "౯"], - "ta": ["0", "௧", "௨", "௩", "௪", "௫", "௬", "௭", "௮", "௯", "௰"], - "kn": ["೦", "೧", "೨", "೩", "೪", "೫", "೬", "೭", "೮", "೯"], - "or": ["୦", "୧", "୨", "୩", "୪", "୫", "୬", "୭", "୮", "୯"], - "pa": ["੦", "੧", "੨", "੩", "੪", "੫", "੬", "੭", "੮", "੯"], -} - -num_dict = dict() -num_dict["en"] = { - "0": "zero", - "1": "one", - "2": "two", - "3": "three", - "4": "four", - "5": "five", - "6": "six", - "7": "seven", - "8": "eight", - "9": "nine", - "10": "ten", - "11": "eleven", - "12": "twelve", - "13": "thirteen", - "14": "fourteen", - "15": "fifteen", - "16": "sixteen", - "17": "seventeen", - "18": "eighteen", - "19": "nineteen", - "20": "twenty", - "21": "twenty-one", - "22": "twenty-two", - "23": "twenty-three", - "24": "twenty-four", - "25": "twenty-five", - "26": "twenty-six", - "27": "twenty-seven", - "28": "twenty-eight", - "29": "twenty-nine", - "30": "thirty", - "31": "thirty-one", - "32": "thirty-two", - "33": "thirty-three", - "34": "thirty-four", - "35": "thirty-five", - "36": "thirty-six", - "37": "thirty-seven", - "38": "thirty-eight", - "39": "thirty-nine", - "40": "forty", - "41": "forty-one", - "42": "forty-two", - "43": "forty-three", - "44": "forty-four", - "45": "forty-five", - "46": "forty-six", - "47": "forty-seven", - "48": "forty-eight", - "49": "forty-nine", - "50": "fifty", - "51": "fifty-one", - "52": "fifty-two", - "53": "fifty-three", - "54": "fifty-four", - "55": "fifty-five", - "56": "fifty-six", - "57": "fifty-seven", - "58": "fifty-eight", - "59": "fifty-nine", - "60": "sixty", - "61": "sixty-one", - "62": "sixty-two", - "63": "sixty-three", - "64": "sixty-four", - "65": "sixty-five", - "66": "sixty-six", - "67": "sixty-seven", - "68": "sixty-eight", - "69": "sixty-nine", - "70": "seventy", - "71": "seventy-one", - "72": "seventy-two", - "73": "seventy-three", - "74": "seventy-four", - "75": "seventy-five", - "76": "seventy-six", - "77": "seventy-seven", - "78": "seventy-eight", - "79": "seventy-nine", - "80": "eighty", - "81": "eighty-one", - "82": "eighty-two", - "83": "eighty-three", - "84": "eighty-four", - "85": "eighty-five", - "86": "eighty-six", - "87": "eighty-seven", - "88": "eighty-eight", - "89": "eighty-nine", - "90": "ninety", - "91": "ninety-one", - "92": "ninety-two", - "93": "ninety-three", - "94": "ninety-four", - "95": "ninety-five", - "96": "ninety-six", - "97": "ninety-seven", - "98": "ninety-eight", - "99": "ninety-nine", - "100": "hundred", - "1000": "thousand", - "100000": "lac", - "10000000": "crore", - "1000000000": "arab", -} # English-India -num_dict["hi"] = { - "0": "शून्य", - "1": "एक", - "2": "दो", - "3": "तीन", - "4": "चार", - "5": "पाँच", - "6": "छः", - "7": "सात", - "8": "आठ", - "9": "नौ", - "10": "दस", - "11": "ग्यारह", - "12": "बारह", - "13": "तेरह", - "14": "चौदह", - "15": "पंद्रह", - "16": "सोलह", - "17": "सत्रह", - "18": "अट्ठारह", - "19": "उन्नीस", - "20": "बीस", - "21": "इक्कीस", - "22": "बाईस", - "23": "तेईस", - "24": "चौबिस", - "25": "पच्चीस", - "26": "छब्बीस", - "27": "सत्ताईस", - "28": "अट्ठाईस", - "29": "उनतीस", - "30": "तीस", - "31": "इकतीस", - "32": "बत्तीस", - "33": "तैंतीस", - "34": "चौंतीस", - "35": "पैंतीस", - "36": "छत्तीस", - "37": "सैंतीस", - "38": "अड़तीस", - "39": "उनतालीस", - "40": "चालीस", - "41": "इकतालीस", - "42": "बयालीस", - "43": "तैंतालीस", - "44": "चौंतालीस", - "45": "पैंतालीस", - "46": "छियालीस", - "47": "सैंतालीस", - "48": "अड़तालीस", - "49": "उनचास", - "50": "पचास", - "51": "इक्यावन​", - "52": "बावन", - "53": "तिरेपन", - "54": "चौवन", - "55": "पचपन", - "56": "छप्पन", - "57": "सत्तावन", - "58": "अट्ठावन", - "59": "उनसठ", - "60": "साठ", - "61": "इकसठ", - "62": "बासठ", - "63": "तिरेसठ", - "64": "चौंसठ", - "65": "पैंसठ", - "66": "छयासठ", - "67": "सरसठ​", - "68": "अड़सठ", - "69": "उनहत्तर", - "70": "सत्तर", - "71": "इकहत्तर", - "72": "बहत्तर", - "73": "तिहत्तर", - "74": "चौहत्तर", - "75": "पचहत्तर", - "76": "छिहत्तर", - "77": "सतहत्तर", - "78": "अठहत्तर", - "79": "उन्यासी", - "80": "अस्सी", - "81": "इक्यासी", - "82": "बयासी", - "83": "तिरासी", - "84": "चौरासी", - "85": "पचासी", - "86": "छियासी", - "87": "सत्तासी", - "88": "अठासी", - "89": "नवासी", - "90": "नब्बे", - "91": "इक्यानवे", - "92": "बानवे", - "93": "तिरानवे", - "94": "चौरानवे", - "95": "पचानवे", - "96": "छियानवे", - "97": "सत्तानवे", - "98": "अट्ठानवे", - "99": "निन्यानवे", - "100": "सौ", - "1000": "हज़ार", - "100000": "लाख", - "10000000": "करोड़", - "1000000000": "अरब", -} # Hindi -num_dict["gu"] = { - "0": "શૂન્ય", - "1": "એક", - "2": "બે", - "3": "ત્રણ", - "4": "ચાર", - "5": "પાંચ", - "6": "છ", - "7": "સાત", - "8": "આઠ", - "9": "નવ", - "10": "દસ", - "11": "અગિયાર", - "12": "બાર", - "13": "તેર", - "14": "ચૌદ", - "15": "પંદર", - "16": "સોળ", - "17": "સત્તર", - "18": "અઢાર", - "19": "ઓગણિસ", - "20": "વીસ", - "21": "એકવીસ", - "22": "બાવીસ", - "23": "તેવીસ", - "24": "ચોવીસ", - "25": "પચ્ચીસ", - "26": "છવીસ", - "27": "સત્તાવીસ", - "28": "અઠ્ઠાવીસ", - "29": "ઓગણત્રીસ", - "30": "ત્રીસ", - "31": "એકત્રીસ", - "32": "બત્રીસ", - "33": "તેત્રીસ", - "34": "ચોત્રીસ", - "35": "પાંત્રીસ", - "36": "છત્રીસ", - "37": "સડત્રીસ", - "38": "અડત્રીસ", - "39": "ઓગણચાલીસ", - "40": "ચાલીસ", - "41": "એકતાલીસ", - "42": "બેતાલીસ", - "43": "ત્રેતાલીસ", - "44": "ચુંમાલીસ", - "45": "પિસ્તાલીસ", - "46": "છેતાલીસ", - "47": "સુડતાલીસ", - "48": "અડતાલીસ", - "49": "ઓગણપચાસ", - "50": "પચાસ", - "51": "એકાવન", - "52": "બાવન", - "53": "ત્રેપન", - "54": "ચોપન", - "55": "પંચાવન", - "56": "છપ્પન", - "57": "સત્તાવન", - "58": "અઠ્ઠાવન", - "59": "ઓગણસાઠ", - "60": "સાઈઠ", - "61": "એકસઠ", - "62": "બાસઠ", - "63": "ત્રેસઠ", - "64": "ચોસઠ", - "65": "પાંસઠ", - "66": "છાસઠ", - "67": "સડસઠ", - "68": "અડસઠ", - "69": "અગણોસિત્તેર", - "70": "સિત્તેર", - "71": "એકોતેર", - "72": "બોતેર", - "73": "તોતેર", - "74": "ચુમોતેર", - "75": "પંચોતેર", - "76": "છોતેર", - "77": "સિત્યોતેર", - "78": "ઇઠ્યોતેર", - "79": "ઓગણાએંસી", - "80": "એંસી", - "81": "એક્યાસી", - "82": "બ્યાસી", - "83": "ત્યાસી", - "84": "ચોર્યાસી", - "85": "પંચાસી", - "86": "છ્યાસી", - "87": "સિત્યાસી", - "88": "ઈઠ્યાસી", - "89": "નેવ્યાસી", - "90": "નેવું", - "91": "એકાણું", - "92": "બાણું", - "93": "ત્રાણું", - "94": "ચોરાણું", - "95": "પંચાણું", - "96": "છન્નું", - "97": "સત્તાણું", - "98": "અઠ્ઠાણું", - "99": "નવ્વાણું", - "100": "સો", - "1000": "હજાર", - "100000": "લાખ", - "1000000": "દસ લાખ", - "10000000": "કરોડ઼", -} # Gujarati -num_dict["mr"] = { - "0": "शून्य", - "1": "एक", - "2": "दोन", - "3": "तीन", - "4": "चार", - "5": "पाच", - "6": "सहा", - "7": "सात", - "8": "आठ", - "9": "नऊ", - "10": "दहा", - "11": "अकरा", - "12": "बारा", - "13": "तेरा", - "14": "चौदा", - "15": "पंधरा", - "16": "सोळा", - "17": "सतरा", - "18": "अठरा", - "19": "एकोणीस", - "20": "वीस", - "21": "एकवीस", - "22": "बावीस", - "23": "तेवीस", - "24": "चोवीस", - "25": "पंचवीस", - "26": "सव्वीस", - "27": "सत्तावीस", - "28": "अठ्ठावीस", - "29": "एकोणतीस", - "30": "तीस", - "31": "एकतीस", - "32": "बत्तीस", - "33": "तेहेतीस", - "34": "चौतीस", - "35": "पस्तीस", - "36": "छत्तीस", - "37": "सदतीस", - "38": "अडतीस", - "39": "एकोणचाळीस", - "40": "चाळीस", - "41": "एक्केचाळीस", - "42": "बेचाळीस", - "43": "त्रेचाळीस", - "44": "चव्वेचाळीस", - "45": "पंचेचाळीस", - "46": "सेहेचाळीस", - "47": "सत्तेचाळीस", - "48": "अठ्ठेचाळीस", - "49": "एकोणपन्नास", - "50": "पन्नास", - "51": "एक्कावन्न", - "52": "बावन्न", - "53": "त्रेपन्न", - "54": "चोपन्न", - "55": "पंचावन्न", - "56": "छप्पन्न", - "57": "सत्तावन्न", - "58": "अठ्ठावन्न", - "59": "एकोणसाठ", - "60": "साठ", - "61": "एकसष्ठ", - "62": "बासष्ठ", - "63": "त्रेसष्ठ", - "64": "चौसष्ठ", - "65": "पासष्ठ", - "66": "सहासष्ठ", - "67": "सदुसष्ठ", - "68": "अडुसष्ठ", - "69": "एकोणसत्तर", - "70": "सत्तर", - "71": "एक्काहत्तर", - "72": "बाहत्तर", - "73": "त्र्याहत्तर", - "74": "चौर्‍याहत्तर", - "75": "पंच्याहत्तर", - "76": "शहात्तर", - "77": "सत्याहत्तर", - "78": "अठ्ठ्याहत्तर", - "79": "एकोण ऐंशी", - "80": "ऐंशी", - "81": "एक्क्याऐंशी", - "82": "ब्याऐंशी", - "83": "त्र्याऐंशी", - "84": "चौऱ्याऐंशी", - "85": "पंच्याऐंशी", - "86": "शहाऐंशी", - "87": "सत्त्याऐंशी", - "88": "अठ्ठ्याऐंशी", - "89": "एकोणनव्वद", - "90": "नव्वद", - "91": "एक्क्याण्णव", - "92": "ब्याण्णव", - "93": "त्र्याण्णव", - "94": "चौऱ्याण्णव", - "95": "पंच्याण्णव", - "96": "शहाण्णव", - "97": "सत्त्याण्णव", - "98": "अठ्ठ्याण्णव", - "99": "नव्व्याण्णव", - "100": "शे", - "1000": "हजार", - "100000": "लाख", - "10000000": "कोटी", - "1000000000": "अब्ज", -} # Marathi -num_dict["bn"] = { - "0": "শূন্য", - "1": "এক", - "2": "দুই", - "3": "তিন", - "4": "চার", - "5": "পাঁচ", - "6": "ছয়", - "7": "সাত", - "8": "আট", - "9": "নয়", - "10": "দশ", - "11": "এগার", - "12": "বার", - "13": "তের", - "14": "চৌদ্দ", - "15": "পনের", - "16": "ষোল", - "17": "সতের", - "18": "আঠার", - "19": "ঊনিশ", - "20": "বিশ", - "21": "একুশ", - "22": "বাইশ", - "23": "তেইশ", - "24": "চব্বিশ", - "25": "পঁচিশ", - "26": "ছাব্বিশ", - "27": "সাতাশ", - "28": "আঠাশ", - "29": "ঊনত্রিশ", - "30": "ত্রিশ", - "31": "একত্রিশ", - "32": "বত্রিশ", - "33": "তেত্রিশ", - "34": "চৌত্রিশ", - "35": "পঁয়ত্রিশ", - "36": "ছত্রিশ", - "37": "সাঁইত্রিশ", - "38": "আটত্রিশ", - "39": "ঊনচল্লিশ", - "40": "চল্লিশ", - "41": "একচল্লিশ", - "42": "বিয়াল্লিশ", - "43": "তেতাল্লিশ", - "44": "চুয়াল্লিশ", - "45": "পঁয়তাল্লিশ", - "46": "ছেচল্লিশ", - "47": "সাতচল্লিশ", - "48": "আটচল্লিশ", - "49": "ঊনপঞ্চাশ", - "50": "পঞ্চাশ", - "51": "একান্ন", - "52": "বায়ান্ন", - "53": "তিপ্পান্ন", - "54": "চুয়ান্ন", - "55": "পঞ্চান্ন", - "56": "ছাপ্পান্ন", - "57": "সাতান্ন", - "58": "আটান্ন", - "59": "ঊনষাট", - "60": "ষাট", - "61": "একষট্টি", - "62": "বাষট্টি", - "63": "তেষট্টি", - "64": "চৌষট্টি", - "65": "পঁয়ষট্টি", - "66": "ছেষট্টি", - "67": "সাতষট্টি", - "68": "আটষট্টি", - "69": "ঊনসত্তর", - "70": "সত্তর", - "71": "একাত্তর", - "72": "বাহাত্তর", - "73": "তিয়াত্তর", - "74": "চুয়াত্তর", - "75": "পঁচাত্তর", - "76": "ছিয়াত্তর", - "77": "সাতাত্তর", - "78": "আটাত্তর", - "79": "ঊনআশি", - "80": "আশি", - "81": "একাশি", - "82": "বিরাশি", - "83": "তিরাশি", - "84": "চুরাশি", - "85": "পঁচাশি", - "86": "ছিয়াশি", - "87": "সাতাশি", - "88": "আটাশি", - "89": "ঊননব্বই", - "90": "নব্বই", - "91": "একানব্বই", - "92": "বিরানব্বই", - "93": "তিরানব্বই", - "94": "চুরানব্বই", - "95": "পঁচানব্বই", - "96": "ছিয়ানব্বই", - "97": "সাতানব্বই", - "98": "আটানব্বই", - "99": "নিরানব্বই", - "100": "শো", - "1000": "হাজার", - "100000": "লাখ", - "10000000": "কোটি", - "1000000000": "একশ’ কোটি", -} # Bengali -num_dict["te"] = { - "0": "సున్నా", - "1": "ఒకటి", - "2": "రెండు", - "3": "మూడు", - "4": "నాలుగు", - "5": "ఐదు", - "6": "ఆరు", - "7": "ఏడు", - "8": "ఎనిమిది", - "9": "తొమ్మిది", - "10": "పది", - "11": "పదకొండు", - "12": "పన్నెండు", - "13": "పదమూడు", - "14": "పద్నాలుగు", - "15": "పదిహేను", - "16": "పదహారు", - "17": "పదిహేడు", - "18": "పద్దెనిమిది", - "19": "పందొమ్మిది", - "20": "ఇరవై", - "21": "ఇరవై ఒకటి", - "22": "ఇరవై రెండు", - "23": "ఇరవై మూడు", - "24": "ఇరవై నాలుగు", - "25": "ఇరవై ఐదు", - "26": "ఇరవై ఆరు", - "27": "ఇరవై ఏడు", - "28": "ఇరవై ఎనిమిది", - "29": "ఇరవై తొమ్మిది", - "30": "ముప్పై", - "31": "ముప్పై ఒకటి", - "32": "ముప్పై రెండు", - "33": "ముప్పై మూడు", - "34": "ముప్పై నాలుగు", - "35": "ముప్పై ఐదు", - "36": "ముప్పై ఆరు", - "37": "ముప్పై ఏడు", - "38": "ముప్పై ఎనిమిది", - "39": "ముప్పై తొమ్మిది", - "40": "నలభై", - "41": "నలభై ఒకటి", - "42": "నలభై రెండు", - "43": "నలభై మూడు", - "44": "నలభై నాలుగు", - "45": "నలభై ఐదు", - "46": "నలభై ఆరు", - "47": "నలభై ఏడు", - "48": "నలభై ఎనిమిది", - "49": "నలభై తొమ్మిది", - "50": "యాభై", - "51": "యాభై ఒకటి", - "52": "యాభై రెండు", - "53": "యాభై మూడు", - "54": "యాభై నాలుగు", - "55": "యాభై ఐదు", - "56": "యాభై ఆరు", - "57": "యాభై ఏడు", - "58": "యాభై ఎనిమిది", - "59": "యాభై తొమ్మిది", - "60": "అరవై", - "61": "అరవై ఒకటి", - "62": "అరవై రెండు", - "63": "అరవై మూడు", - "64": "అరవై నాలుగు", - "65": "అరవై ఐదు", - "66": "అరవై ఆరు", - "67": "అరవై ఏడు", - "68": "అరవై ఎనిమిది", - "69": "అరవై తొమ్మిది", - "70": "డెబ్బై", - "71": "డెబ్బై ఒకటి", - "72": "డెబ్బై రెండు", - "73": "డెబ్బై మూడు", - "74": "డెబ్బై నాలుగు", - "75": "డెబ్బై ఐదు", - "76": "డెబ్బై ఆరు", - "77": "డెబ్బై ఏడు", - "78": "డెబ్బై ఎనిమిది", - "79": "డెబ్బై తొమ్మిది", - "80": "ఎనభై", - "81": "ఎనభై ఒకటి", - "82": "ఎనభై రెండు", - "83": "ఎనభై మూడు", - "84": "ఎనభై నాలుగు", - "85": "ఎనభై ఐదు", - "86": "ఎనభై ఆరు", - "87": "ఎనభై ఏడు", - "88": "ఎనభై ఎనిమిది", - "89": "ఎనభై తొమ్మిది", - "90": "తొంభై", - "91": "తొంభై ఒకటి", - "92": "తొంభై రెండు", - "93": "తొంభై మూడు", - "94": "తొంభై నాలుగు", - "95": "తొంభై ఐదు", - "96": "తొంభై ఆరు", - "97": "తొంభై ఏడు", - "98": "తొంభై ఎనిమిది", - "99": "తొంభై తొమ్మిది", - "100": "వందల", - "1000": "వేల", - "100000": "లక్షల", - "10000000": "కోట్ల", - "1000000000": "బిలియన్", -} # Telugu -num_dict["ta"] = { - "0": "பூஜ்ஜியம்", - "1": "ஒன்று", - "2": "இரண்டு", - "3": "மூன்று", - "4": "நான்கு", - "5": "ஐந்து", - "6": "ஆறு", - "7": "ஏழு", - "8": "எட்டு", - "9": "ஒன்பது", - "10": "பத்து", - "11": "பதினொன்று", - "12": "பன்னிரண்டு", - "13": "பதிமூன்று", - "14": "பதினான்கு", - "15": "பதினைந்து", - "16": "பதினாறு", - "17": "பதினேழு", - "18": "பதினெட்டு", - "19": "பத்தொன்பது", - "20": "இருபது", - "21": "இருபது ஒன்று", - "22": "இருபத்து இரண்டு", - "23": "இருபத்து மூன்று", - "24": "இருபத்து நான்கு", - "25": "இருபத்து ஐந்து", - "26": "இருபத்து ஆறு", - "27": "இருபத்து ஏழு", - "28": "இருபத்து எட்டு", - "29": "இருபத்து ஒன்பது", - "30": "முப்பது", - "31": "முப்பத்து ஒன்று", - "32": "முப்பத்து இரண்டு", - "33": "முப்பத்து மூன்று", - "34": "முப்பத்து நான்கு", - "35": "முப்பத்து ஐந்து", - "36": "முப்பத்து ஆறு", - "37": "முப்பத்து ஏழு", - "38": "முப்பத்து எட்டு", - "39": "முப்பத்து ஒன்பது", - "40": "நாற்பது", - "41": "நாற்பத்து ஒன்று", - "42": "நாற்பத்து இரண்டு", - "43": "நாற்பத்து மூன்று", - "44": "நாற்பத்து நான்கு", - "45": "நாற்பத்து ஐந்து", - "46": "நாற்பத்து ஆறு", - "47": " நாற்பத்து ஏழு", - "48": "நாற்பத்து எட்டு", - "49": "நாற்பத்து ஒன்பது", - "50": "ஐம்பது", - "51": "ஐம்பத்து ஒன்று", - "52": "ஐம்பத்து இரண்டு", - "53": "ஐம்பத்து மூன்று", - "54": "ஐம்பத்து நான்கு", - "55": "ஐம்பத்து ஐந்து", - "56": "ஐம்பத்து ஆறு", - "57": "ஐம்பத்து ஏழு", - "58": "ஐம்பத்து எட்டு", - "59": "ஐம்பத்து ஒன்பது", - "60": "அறுபது", - "61": "அறுபத்து ஒன்று", - "62": "அறுபத்து இரண்டு", - "63": "அறுபத்து மூன்று", - "64": "அறுபத்து நான்கு", - "65": "அறுபத்து ஐந்து", - "66": "அறுபத்து ஆறு", - "67": "அறுபத்து ஏழு", - "68": "அறுபத்து எட்டு", - "69": "அறுபத்து ஒன்பது", - "70": "எழுபது", - "71": "எழுபத்தி ஒன்று", - "72": "எழுபத்தி இரண்டு", - "73": "எழுபத்தி முச்சக்கர", - "74": "எழுபத்தி நான்கு", - "75": "எழுபத்தி ஐந்து", - "76": "எழுபத்தி ஆறு", - "77": "எழுபத்தி ஏழு", - "78": "எழுபத்தி எட்டு", - "79": "எழுபத்தி ஒன்பது", - "80": "எண்பது", - "81": "எண்பத்தியொன்று", - "82": "எண்பத்திரண்டு", - "83": "எண்பத்திமூன்று", - "84": "என்பதினான்கு", - "85": "என்பதினைந்து", - "86": "எண்பத்திஆறு", - "87": "எண்பத்திஏழு", - "88": "எண்பத்தியெட்டு", - "89": "எண்பத்தியொன்பது", - "90": "தொன்னூறு", - "91": "தொண்ணூற்றியொன்று", - "92": "தொண்ணூற்றிரண்டு", - "93": "தொண்ணூற்றிமூன்று", - "94": "தொண்ணூற்றிநான்கு", - "95": "தொண்ணூற்றிஐந்து", - "96": "தொண்ணூற்றியாறு", - "97": "தொண்ணூற்றியேழு", - "98": "தொண்ணூற்றியெட்டு", - "99": "தொண்ணூற்றிஒன்பது", - "100": "நூறு", - "1000": "ஆயிரம்", - "100000": "இலட்சம்", - "10000000": "கோடி", - "1000000000": "பில்லியன்", -} # Tamil -num_dict["kn"] = { - "0": "ಸೊನ್ನೆ", - "1": "ಒಂದು", - "2": "ಎರಡು", - "3": "ಮೂರು", - "4": "ನಾಲ್ಕು", - "5": "ಅಯ್ದು", - "6": "ಆರು", - "7": "ಏಳು", - "8": "ಎಂಟು", - "9": "ಒಂಬತ್ತು", - "10": "ಹತ್ತು", - "11": "ಹನ್ನೊಂದು", - "12": "ಹನ್ನೆರಡು", - "13": "ಹದಿಮೂರು", - "14": "ಹದಿನಾಲ್ಕು", - "15": "ಹದಿನೈದು", - "16": "ಹದಿನಾರು", - "17": "ಹದಿನೇಳು", - "18": "ಹದಿನೆಂಟು", - "19": "ಹತ್ತೊಂಬತ್ತು", - "20": "ಇಪ್ಪತ್ತು", - "21": "ಇಪ್ಪತ್ತ್’ಒಂದು", - "22": "ಇಪ್ಪತ್ತ್’ಎರಡು", - "23": "ಇಪ್ಪತ್ತ್’ಮೂರು", - "24": "ಇಪ್ಪತ್ತ್’ನಾಲ್ಕು", - "25": "ಇಪ್ಪತ್ತ್’ಐದು", - "26": "ಇಪ್ಪತ್ತ್’ಆರು", - "27": "ಇಪ್ಪತ್ತ್’ಏಳು", - "28": "ಇಪ್ಪತ್ತ್’ಎಂಟು", - "29": "ಇಪ್ಪತ್ತ್’ಒಂಬತ್ತು", - "30": "ಮೂವತ್ತು", - "31": "ಮುವತ್ತ್’ಒಂದು", - "32": "ಮುವತ್ತ್’ಎರಡು", - "33": "ಮುವತ್ತ್’ಮೂರು", - "34": "ಮೂವತ್ತ್’ನಾಲ್ಕು", - "35": "ಮೂವತ್ತ್’ಐದು", - "36": "ಮೂವತ್ತ್’ಆರು", - "37": "ಮೂವತ್ತ್’ಏಳು", - "38": "ಮೂವತ್ತ್’ಎಂಟು", - "39": "ಮೂವತ್ತ್’ಒಂಬತ್ತು", - "40": "ನಲವತ್ತು", - "41": "ನಲವತ್ತೊಂದು", - "42": "ನಲವತ್ತ್ ಎರಡು", - "43": "ನಲವತ್ತ್ ಮೂರು", - "44": "ನಲವತ್ತ್ ನಾಲ್ಕು", - "45": "ನಲವತ್ತೈದು", - "46": "ನಲವತ್ತಾರು", - "47": "ನಲವತ್ತೇಳು", - "48": "ನಲವತ್ತೆಂಟು", - "49": "ನಲವತ್ತೊಂಬತ್ತು", - "50": "ಐವತ್ತು", - "51": "ಐವತ್ತೊಂದು", - "52": "ಐವತ್ತೆರಡು", - "53": "ಐವತ್ತಮೂರು", - "54": "ಐವತ್ತ್ನಾಲ್ಕು", - "55": "ಐವತ್ತೈದು", - "56": "ಐವತ್ತಾರು", - "57": "ಐವತ್ತೇಳು", - "58": "ಐವತ್ತೆಂಟು", - "59": "ಐವತ್ತೊಂಬತ್ತು", - "60": "ಅರವತ್ತು", - "61": "ಅರವತ್ತೊಂದು", - "62": "ಅರವತ್ತೆರಡು", - "63": "ಅರವತ್ತ್ ಮೂರು", - "64": "ಅರವತ್ತ್ ನಾಲ್ಕು", - "65": "ಅರವತ್ತೈದು", - "66": "ಅರವತ್ತಾರು", - "67": "ಅರವತ್ತೇಳು", - "68": "ಅರವತ್ತೆಂಟು", - "69": "ಅರವತ್ತೊಂಬತ್ತು", - "70": "ಎಪ್ಪತ್ತು", - "71": "ಎಪ್ಪತ್ತೊಂದು", - "72": "ಎಪ್ಪತ್ತೆರಡು", - "73": "ಎಪ್ಪತ್ತ್ ಮೂರು", - "74": "ಎಪ್ಪತ್ತ್ ನಾಲ್ಕು", - "75": "ಎಪ್ಪತ್ತೈದು", - "76": "ಎಪ್ಪತ್ತಾರು", - "77": "ಎಪ್ಪತ್ತೇಳು", - "78": "ಎಪ್ಪತ್ತೆಂಟು", - "79": "ಎಪ್ಪತ್ತೊಂಬತ್ತು", - "80": "ಎಂಬತ್ತು", - "81": "ಎಂಬತ್ತೊಂದು", - "82": "ಎಂಬತ್ತೆರಡು", - "83": "ಎಂಬತ್ತ್ ಮೂರು", - "84": "ಎಂಬತ್ತ್ ನಾಲ್ಕು", - "85": "ಎಂಬತ್ತೈದು", - "86": "ಎಂಬತ್ತಾರು", - "87": "ಎಂಬತ್ತೇಳು", - "88": "ಎಂಬತ್ತೆಂಟು", - "89": "ಎಂಬತ್ತೊಂಬತ್ತು", - "90": "ತೊಂಬತ್ತು", - "91": "ತೊಂಬತ್ತೊಂದು", - "92": "ತೊಂಬತ್ತೆರಡು", - "93": "ತೊಂಬತ್ತ ಮೂರು", - "94": "ತೊಂಬತ್ತ ನಾಲ್ಕು", - "95": "ತೊಂಬತ್ತೈದು", - "96": "ತೊಂಬತ್ತಾರು", - "97": "ತೊಂಬತ್ತೇಳು", - "98": "ತೊಂಬತ್ತೆಂಟು", - "99": "ತೊಂಬತ್ತೊಂಬತ್ತು", - "100": "ನೂರ", - "1000": "ಸಾವಿರದ", - "100000": "ಲಕ್ಷದ", - "10000000": "ಕೋಟಿ", - "1000000000": "ಶತಕೋಟಿ", -} # Kannada -num_dict["or"] = { - "0": "ଶୁନ୍ୟ", - "1": "ଏକ", - "2": "ଦୁଇ", - "3": "ତିନି", - "4": "ଚାରି", - "5": "ପାଞ୍ଚ", - "6": "ଛଅ", - "7": "ସାତ", - "8": "ଆଠ", - "9": "ନଅ", - "10": "ନଅ", - "11": "ଏଗାର", - "12": "ବାର", - "13": "ତେର", - "14": "ଚଉଦ", - "15": "ପନ୍ଦର", - "16": "ଷୋହଳ", - "17": "ସତର", - "18": "ଅଠର", - "19": "ଊଣାଇଶ", - "20": "କୋଡିଏ", - "21": "ଏକୋଇଶି", - "22": "ବାଇଶି", - "23": "ତେଇଶି", - "24": "ଚବିଶି", - "25": "ପଚିଶି", - "26": "ଛବିଶି", - "27": "ସତାଇଶି", - "28": "ଅଠାଇଶି", - "29": "ଅଣତିରିଶି", - "30": "ତିରିଶି", - "31": "ଏକତିରିଶି", - "32": "ବତିଶି", - "33": "ତେତିଶି", - "34": "ଚଉତିରିଶି", - "35": "ପଞ୍ଚତିରିଶି", - "36": "ଛତିଶି", - "37": "ସଂଇତିରିଶି", - "38": "ଅଠତିରିଶି", - "39": "ଅଣଚାଳିଶି", - "40": "ଚାଳିଶି", - "41": "ଏକଚାଳିଶି", - "42": "ବୟାଳିଶି", - "43": "ତେୟାଳିଶି", - "44": "ଚଉରାଳିଶି", - "45": "ପଞ୍ଚଚାଳିଶି", - "46": "ଛୟାଳିଶି", - "47": "ସତଚାଳିଶି", - "48": "ଅଠଚାଳିଶି", - "49": "ଅଣଚାଶ", - "50": "ପଚାଶ", - "51": "ଏକାବନ", - "52": "ବାଉନ", - "53": "ତେପନ", - "54": "ଚଉବନ", - "55": "ପଞ୍ଚାବନ", - "56": "ଛପନ", - "57": "ସତାବନ", - "58": "ଅଠାବନ", - "59": "ଅଣଷଠି", - "60": "ଷାଠିଏ", - "61": "ଏକଷଠି", - "62": "ବାଷଠି", - "63": "ତେଷଠି", - "64": "ଚଉଷଠି", - "65": "ପଞ୍ଚଷଠି", - "66": "ଛଅଷଠି", - "67": "ସତଷଠି", - "68": "ଅଠଷଠି", - "69": "ଅଣସ୍ତରୀ", - "70": "ସତୂରୀ", - "71": "ଏକସ୍ତରୀ", - "72": "ବାସ୍ତରୀ", - "73": "ତେସ୍ତରୀ", - "74": "ଚଉସ୍ତରୀ", - "75": "ପଞ୍ଚସ୍ତରୀ", - "76": "ଛଅସ୍ତରୀ", - "77": "ସତସ୍ତରୀ", - "78": "ଅଠସ୍ତରୀ", - "79": "ଅଣାଅଶୀ", - "80": "ଅଶୀ", - "81": "ଏକାଅଶୀ", - "82": "ବୟାଅଶୀ", - "83": "ତେୟାଅଶୀ", - "84": "ଚଉରାଅଶୀ", - "85": "ପଞ୍ଚାଅଶୀ", - "86": "ଛୟାଅଶୀ", - "87": "ସତାଅଶୀ", - "88": "ଅଠାଅଶୀ", - "89": "ଅଣାନବେ", - "90": "ନବେ", - "91": "ଏକାନବେ", - "92": "ବୟାନବେ", - "93": "ତେୟାନବେ", - "94": "ଚଉରାନବେ", - "95": "ପଞ୍ଚାନବେ", - "96": "ଛୟାନବେ", - "97": "ସତାନବେ", - "98": "ଅଠାନବେ", - "99": "ଅନେଶତ", - "100": "ଶହେ", - "1000": "ହଜାର", - "100000": "ଲକ୍ଷ", - "10000000": "କୋଟି", - "1000000000": "କୋଟି", -} # Oriya -num_dict["pa"] = { - "0": "ਸਿਫਰ ", - "1": "ਇੱਕ", - "2": "ਦੋ", - "3": "ਤਿੰਨ", - "4": "ਚਾਰ", - "5": "ਪੰਜ", - "6": "ਛੇ", - "7": "ਸੱਤ", - "8": "ਅੱਠ", - "9": "ਨੌਂ", - "10": "ਦੱਸ", - "11": "ਗਿਆਰਾਂ", - "12": "ਬਾਰਾਂ", - "13": "ਤੇਰਾਂ", - "14": "ਚੌਦਾਂ", - "15": "ਪੰਦਰਾਂ", - "16": "ਸੋਲ਼ਾਂ", - "17": "ਸਤਾਰਾਂ", - "18": "ਅਠਾਰਾਂ", - "19": "ਉਨੀ", - "20": "ਵੀਹ", - "21": "ਇੱਕੀ", - "22": "ਬਾਈ", - "23": "ਤੇਈ", - "24": "ਚੌਵੀ", - "25": "ਪੰਝੀ", - "26": "ਛੱਬੀ", - "27": "ਸਤਾਈ", - "28": "ਅਠਾਈ", - "29": "ਉਨੱਤੀ", - "30": "ਤੀਹ", - "31": "ਇਕੱਤੀ", - "32": "ਬੱਤੀ", - "33": "ਤੇਤੀ", - "34": "ਚੌਂਤੀ", - "35": "ਪੈਂਤੀ", - "36": "ਛੱਤੀ", - "37": "ਸੈਂਤੀ", - "38": "ਅਠੱਤੀ", - "39": "ਉਨਤਾਲੀ", - "40": "ਚਾਲੀ", - "41": "ਇਕਤਾਲੀ", - "42": "ਬਤਾਲੀ", - "43": "ਤਰਤਾਲੀ", - "44": "ਚੌਤਾਲੀ", - "45": "ਪੰਜਤਾਲੀ", - "46": "ਛਿਆਲੀ", - "47": "ਸੰਤਾਲੀ", - "48": "ਅੱਠਤਾਲੀ", - "49": "ਉਣਿੰਜਾ", - "50": "ਪੰਜਾਹ", - "51": "ਇਕਵਿੰਜਾ", - "52": "ਬਵਿੰਜਾ", - "53": "ਤਰਵਿੰਜਾ", - "54": "ਚਰਿੰਜਾ", - "55": "ਪਚਵਿੰਜਾ", - "56": "ਛਪਿੰਜਾ", - "57": "ਸਤਵਿੰਜਾ", - "58": "ਅੱਠਵਿੰਜਾ", - "59": "ਉਣਾਠ", - "60": "ਸੱਠ", - "61": "ਇਕਾਠ", - "62": "ਬਾਠ੍ਹ", - "63": "ਤਰੇਠ੍ਹ", - "64": "ਚੌਠ੍ਹ", - "65": "ਪੈਂਠ", - "66": "ਛਿਆਠ", - "67": "ਸਤਾਹਠ", - "68": "ਅੱਠਾਠ", - "69": "ਉਣੱਤਰ", - "70": "ਸੱਤਰ", - "71": "ਇਕ੍ਹੱਤਰ", - "72": "ਬਹੱਤਰ", - "73": "ਤਹੱਤਰ", - "74": "ਚੌਹੱਤਰ", - "75": "ਪੰਜੱਤਰ", - "76": "ਛਿਹੱਤਰ", - "77": "ਸਤੱਤਰ", - "78": "ਅਠੱਤਰ", - "79": "ਉਣਾਸੀ", - "80": "ਅੱਸੀ", - "81": "ਇਕਾਸੀ", - "82": "ਬਿਆਸੀ", - "83": "ਤਰਾਸੀ", - "84": "ਚਰਾਸੀ", - "85": "ਪੰਜਾਸੀ", - "86": "ਛਿਆਸੀ", - "87": "ਸਤਾਸੀ", - "88": "ਅਠਾਸੀ", - "89": "ਉਣਾਨਵੇਂ", - "90": "ਨੱਬੇ", - "91": "ਇਕਾਨਵੇਂ", - "92": "ਬਿਆਨਵੇਂ", - "93": "ਤਰਾਨਵੇਂ", - "94": "ਚਰਾਨਵੇਂ", - "95": "ਪਚਾਨਵੇਂ", - "96": "ਛਿਆਨਵੇਂ", - "97": "ਸਤਾਨਵੇਂ", - "98": "ਅਠਾਨਵੇਂ", - "99": "ਨਿੜਾਨਵੇਂ", - "100": "ਸੌ", - "1000": "ਹਜਾਰ", - "100000": "ਲੱਖ", - "10000000": "ਕਰੋੜ", - "1000000000": "ਅਰਬ", -} # Punjabi - -# --------------------------- num_to_word.py ------------------------------ -""" -Method to convert Numbers to Words -for indian languages - -Use cases:- -1) Speech recognition pre-processing -2) Language modeling Data pre-processing - -------------------------- -check indic_numbers.py to add support -for any indian language -""" - - -def language_specific_exception(words, lang, combiner): - """ - Language Specific Exception will come here - """ - - def occurs_at_end(piece): - return words[-len(piece) :] == piece - - if lang == "mr": - words = words.replace("एक" + combiner + "शे", "शंभर") - elif lang == "gu": - words = words.replace("બે" + combiner + "સો", "બસ્સો") - elif lang == "te": - exception_dict = { - "1": "ఒక", - "100": "వంద", - "100+": "వందలు", - "1000": "వెయ్యి", - "1000+": "వేలు", - "100000": "లక్ష", - "100000+": "లక్షలు", - "10000000": "కోటి", - "10000000+": "కోట్లు", - } - - test_case = ["100", "1000", "100000", "10000000"] - for test in test_case: - test_word = num_dict["te"][test] - match = num_dict["te"]["1"] + combiner + test_word - # for numbers like : 100, 1000, 100000 - if words == match: - return exception_dict[test] - # for numbers like : 200, 4000, 800000 - elif occurs_at_end(test_word): - words = words.replace(test_word, exception_dict[test + "+"]) - # for numbers like : 105, 1076, 123993 - elif not occurs_at_end(match): - replacement = exception_dict["1"] + combiner + exception_dict[test] - words = words.replace(match, replacement) - - # Exception case for 101...199 - special_case = "ఒక" + combiner + "వంద" - words = words.replace(special_case, "నూట") - elif lang == "kn": - # special case for 100 - if words == ("ಒಂದು" + combiner + "ನೂರ"): - return "ನೂರು" - exception_dict = { - "ನೂರ": "ನೂರು", - "ಸಾವಿರದ": "ಸಾವಿರ", - "ಲಕ್ಷದ": "ಲಕ್ಷ", - "ಕೋಟಿಯ": "ಕೋಟಿ", - } - for expt in exception_dict: - if occurs_at_end(expt): - words = words.replace(expt, exception_dict[expt]) - return words - - -def num_to_word(num, lang, separator=", ", combiner=" "): - """ - Main Method - :param num: Number digits from any indian language - :param lang: Language Code from supported Language - :param separator: Separator character i.e. separator = '-' --> 'two hundred-sixty' - :param combiner: combine number with position i.e. combiner = '-' --> 'two-hundred sixty' - :return: UTF-8 String of numbers in words - """ - lang = lang.lower() - num = str(num) - - # Load dictionary according to language code - assert lang in supported_lang, "Language not supported" - num_dic = num_dict[lang] - - # dash default combiner for english-india - if (lang == "en") & (combiner == " "): - combiner = "-" - - # Remove punctuations from numbers - num = str(num).replace(",", "").replace(" ", "") - - # return word as it is if not number - if not num.isdecimal(): - return num - - # Replace native language numbers with english digits - for language in supported_lang: - for num_index in range(10): - num = num.replace(all_num[language][num_index], all_num["en"][num_index]) - - # Assert that input contains only integer number - for digit in num: - assert digit in all_num["en"], "Give proper input" - - # Process - # For Number longer than 9 digits - def all_two_digit(digits_2): - if len(digits_2) <= 1: # Provided only one/zero digit - return num_dic.get(digits_2, "") - elif digits_2 == "00": # Two Zero provided - return num_dic["0"] + separator + num_dic["0"] - elif digits_2[0] == "0": # First digit is zero - return num_dic["0"] + separator + num_dic[digits_2[1]] - else: # Both digit provided - return num_dic[digits_2] - - # For Number less than 9 digits - def two_digit(digits_2): - digits_2 = digits_2.lstrip("0") - if len(digits_2) != 0: - return num_dic[digits_2] - else: - return "" - - def all_digit(digits): - digits = digits.lstrip("0") - digit_len = len(digits) - if digit_len > 3: - num_of_digits_to_process = (digit_len % 2) + 1 - process_digits = digits[:num_of_digits_to_process] - base = str(10 ** (int(digit_len / 2) * 2 - 1)) - remain_digits = digits[num_of_digits_to_process:] - return ( - num_dic[process_digits] - + combiner - + num_dic[base] - + separator - + all_digit(remain_digits) - ) - elif len(digits) == 3: - return ( - num_dic[digits[:1]] - + combiner - + num_dic["100"] - + separator - + two_digit(digits[1:]) - ) - else: - return two_digit(digits) - - num = num.lstrip("0") - full_digit_len = len(num) - - if full_digit_len == 0: - output = num_dic["0"] - elif full_digit_len <= 9: - output = all_digit(num) - else: - iteration = round(full_digit_len / 2) - output = all_two_digit(num[:2]) # First to digit - for i in range(1, iteration): - output = ( - output + separator + all_two_digit(num[i * 2 : (i + 1) * 2]) - ) # Next two digit pairs - remaining_digits = num[iteration * 2 :] - if not all_two_digit(remaining_digits) == "": - output = ( - output + separator + all_two_digit(remaining_digits) - ) # remaining Last one/two digits - - output = output.strip(separator) - - output = language_specific_exception(output, lang, combiner) - - return output - - -# --------------------------------- num_to_word_on_a_sent --------------------------------- - - -def is_digit(word, digit_pattern): - return re.search(digit_pattern, word) - - -def remove_punct(sent): - clean = re.sub("[%s]" % re.escape(string.punctuation), " ", sent) - return " ".join([word for word in clean.split() if word]) - - -def normalize_nums(text, lang): - """ - text: str (eg) - lang: lang code ['en', 'hi'] - - returns: str - (eg) - """ - - if lang in supported_lang: - text = text.replace('-',' - ') # space separate hyphen - words = text.split() - lang_digits = [str(i) for i in range(0, 10)] - - digit_pattern = "[" + "".join(lang_digits) + "]" - num_indices = [ - ind for ind, word in enumerate(words) if is_digit(word, digit_pattern) - ] - - words_up = [ - num_to_word(word, lang, separator=" ", combiner=" ") - if ind in num_indices - else word - for ind, word in enumerate(words) - ] - return " ".join(words_up) - else: - return text - - -if __name__ == "__main__": - print(normalize_nums("रीटा के पास 16 बिल्लियाँ हैं।", "hi")) diff --git a/spaces/skf15963/summary/fengshen/examples/pretrain_t5/convert_ckpt_to_bin.py b/spaces/skf15963/summary/fengshen/examples/pretrain_t5/convert_ckpt_to_bin.py deleted file mode 100644 index 2aeef8c860864d138b0c970baca72a568bf51a19..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/pretrain_t5/convert_ckpt_to_bin.py +++ /dev/null @@ -1,37 +0,0 @@ -import time -from builtins import print -import argparse - -import torch -# os.environ["CUDA_VISIBLE_DEVICES"] = '3' - - -def get_time_str(): - return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - - -def main(): - total_parser = argparse.ArgumentParser("Pretrain Unsupervise.") - total_parser.add_argument('--ckpt_path', default=None, type=str) - total_parser.add_argument('--bin_path', default=None, type=str) - total_parser.add_argument('--rm_prefix', default=None, type=str) - # * Args for base model - args = total_parser.parse_args() - print('Argument parse success.') - state_dict = torch.load(args.ckpt_path)['module'] - new_state_dict = {} - - if args.rm_prefix is not None: - prefix_len = len(args.rm_prefix) - for k, v in state_dict.items(): - if k[:prefix_len] == args.rm_prefix: - new_state_dict[k[prefix_len:]] = v - else: - new_state_dict[k] = v - else: - new_state_dict = state_dict - torch.save(new_state_dict, args.bin_path) - - -if __name__ == '__main__': - main() diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/dynamicconv_layer/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/dynamicconv_layer/__init__.py deleted file mode 100644 index 22dc6f403d2a0ecdb1b9e7e69ed96bd560e93b2c..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/dynamicconv_layer/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .dynamicconv_layer import DynamicconvLayer # noqa diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq_cli/score.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq_cli/score.py deleted file mode 100644 index 0b207be959d55f6a56d8c5eb7db3dbe0c1ac977e..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq_cli/score.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -BLEU scoring of generated translations against reference translations. -""" - -import argparse -import os -import sys - -from fairseq.data import dictionary -from fairseq.scoring import bleu - - -def get_parser(): - parser = argparse.ArgumentParser( - description="Command-line script for BLEU scoring." - ) - # fmt: off - parser.add_argument('-s', '--sys', default='-', help='system output') - parser.add_argument('-r', '--ref', required=True, help='references') - parser.add_argument('-o', '--order', default=4, metavar='N', - type=int, help='consider ngrams up to this order') - parser.add_argument('--ignore-case', action='store_true', - help='case-insensitive scoring') - parser.add_argument('--sacrebleu', action='store_true', - help='score with sacrebleu') - parser.add_argument('--sentence-bleu', action='store_true', - help='report sentence-level BLEUs (i.e., with +1 smoothing)') - # fmt: on - return parser - - -def cli_main(): - parser = get_parser() - args = parser.parse_args() - print(args) - - assert args.sys == "-" or os.path.exists( - args.sys - ), "System output file {} does not exist".format(args.sys) - assert os.path.exists(args.ref), "Reference file {} does not exist".format(args.ref) - - dict = dictionary.Dictionary() - - def readlines(fd): - for line in fd.readlines(): - if args.ignore_case: - yield line.lower() - else: - yield line - - if args.sacrebleu: - import sacrebleu - - def score(fdsys): - with open(args.ref) as fdref: - print(sacrebleu.corpus_bleu(fdsys, [fdref]).format()) - - elif args.sentence_bleu: - - def score(fdsys): - with open(args.ref) as fdref: - scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) - for i, (sys_tok, ref_tok) in enumerate( - zip(readlines(fdsys), readlines(fdref)) - ): - scorer.reset(one_init=True) - sys_tok = dict.encode_line(sys_tok) - ref_tok = dict.encode_line(ref_tok) - scorer.add(ref_tok, sys_tok) - print(i, scorer.result_string(args.order)) - - else: - - def score(fdsys): - with open(args.ref) as fdref: - scorer = bleu.Scorer( - bleu.BleuConfig( - pad=dict.pad(), - eos=dict.eos(), - unk=dict.unk(), - ) - ) - for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)): - sys_tok = dict.encode_line(sys_tok) - ref_tok = dict.encode_line(ref_tok) - scorer.add(ref_tok, sys_tok) - print(scorer.result_string(args.order)) - - if args.sys == "-": - score(sys.stdin) - else: - with open(args.sys, "r") as f: - score(f) - - -if __name__ == "__main__": - cli_main() diff --git a/spaces/stamps-labs/stamp2vec/embedding_models/vits8/oml/example.py b/spaces/stamps-labs/stamp2vec/embedding_models/vits8/oml/example.py deleted file mode 100644 index 034bcd77068e8be1f82e7a6abe524336f1981176..0000000000000000000000000000000000000000 --- a/spaces/stamps-labs/stamp2vec/embedding_models/vits8/oml/example.py +++ /dev/null @@ -1,12 +0,0 @@ -from PIL import Image -from model_oml import EmbeddingModelOML - -def get_embeddings(img_path: str): - model = EmbeddingModelOML() - image = Image.open(img_path) - embeddings = model(image=image) - return embeddings - - -if __name__ == "__main__": - print(get_embeddings("data/test/images/99d_15.bmp")) \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Bengali Film Bojhena Se Bojhena Full Movie 11 LINK.md b/spaces/stomexserde/gpt4-ui/Examples/Bengali Film Bojhena Se Bojhena Full Movie 11 LINK.md deleted file mode 100644 index 619e4c507f6b2cbd1d9960443d81cb799886914c..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Bengali Film Bojhena Se Bojhena Full Movie 11 LINK.md +++ /dev/null @@ -1,17 +0,0 @@ - -

            Bojhena Se Bojhena: A Bengali Romantic Drama with a Twist

            -

            Bojhena Se Bojhena is a 2012 Bengali film directed by Raj Chakraborty and starring Soham Chakraborty, Mimi Chakraborty, Abir Chatterjee and Payel Sarkar. The film revolves around two couples who board a bus from Kolkata to Siliguri, unaware of the fate that awaits them. A tragic accident changes their lives forever and reveals their true identities and feelings.

            -

            Bengali Film Bojhena Se Bojhena Full Movie 11


            Download Zip 🔗 https://urlgoal.com/2uIabD



            -

            The film was a remake of the 2010 Telugu film Ala Modalaindi and was a critical and commercial success. It won several awards, including the Filmfare Award for Best Film (Bengali) and Best Director (Bengali). The film also spawned a television series of the same name that aired on Star Jalsha from 2013 to 2016.

            -

            If you are looking for a romantic drama with a twist, you can watch Bojhena Se Bojhena online on Hotstar[^2^], Hoichoi[^3^] or Hungama Play[^3^]. You can also download the film from the Internet Archive[^1^] for free. Enjoy this heartwarming story of love, loss and destiny.

            - -

            The film has four main characters: Avik (Soham Chakraborty), a photographer who falls in love with Riya (Payel Sarkar), a flight attendant; Ananya (Mimi Chakraborty), a journalist who is engaged to Joy (Abir Chatterjee), a businessman. Avik and Ananya meet at a wedding and become friends, while Riya and Joy are childhood sweethearts. However, things get complicated when Avik and Ananya realize that they have feelings for each other, while Riya and Joy are unhappy in their relationship.

            -

            The film explores the themes of friendship, love, betrayal, sacrifice and destiny. It also has several humorous and emotional moments that keep the audience engaged. The film has a nonlinear narrative that switches between the past and the present, revealing the backstory of the characters and their connections. The film also has a surprise twist at the end that shocks the viewers and leaves them with a lot of questions.

            -

            Bojhena Se Bojhena is a film that will make you laugh, cry and think. It is a film that will touch your heart and stay with you for a long time. If you are a fan of Bengali cinema or romantic dramas, you should not miss this film.

            -

            - -

            The film has a stellar cast that delivers impressive performances. Soham Chakraborty and Mimi Chakraborty have a great chemistry and portray their characters with sincerity and charm. Abir Chatterjee and Payel Sarkar also do justice to their roles and show their versatility as actors. The supporting cast, including Arijit Dutta, Biswajit Chakraborty and Tulika Basu, also add value to the film.

            -

            The film also has a melodious soundtrack composed by Arindam Chatterjee and Indradeep Dasgupta. The songs are sung by popular singers like Arijit Singh, Prashmita Paul, Ash King and Shreya Ghoshal. The songs are well-written and suit the mood and theme of the film. Some of the songs, like "Bojhena Se Bojhena", "Na Re Na" and "Sajna", have become hit numbers and are still popular among the fans.

            -

            The film also has a good cinematography by Soumik Haldar and editing by Rabiranjan Maitra. The film has a smooth flow and a crisp pace that keep the audience hooked. The film also has some beautiful locations that add to the visual appeal of the film. The film has a budget of 2.5 crore rupees and a box office collection of 15 crore rupees, making it one of the highest-grossing Bengali films of 2012.

            e93f5a0c3f
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Download Naruto Shippuden The Movie Sub Indo.md b/spaces/stomexserde/gpt4-ui/Examples/Download Naruto Shippuden The Movie Sub Indo.md deleted file mode 100644 index a43b001bf66a4f882bff818b7f4407870764383f..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Download Naruto Shippuden The Movie Sub Indo.md +++ /dev/null @@ -1,21 +0,0 @@ -
            -

            How to Download Naruto Shippuden the Movie Sub Indo

            -

            If you are a fan of Naruto Shippuden, you might want to watch the movie series that follows the adventures of Naruto and his friends as they face various enemies and challenges. There are 10 movies in total, 3 from Naruto's childhood and 7 from his teenage years. But where can you download them with Indonesian subtitles?

            -

            download naruto shippuden the movie sub indo


            Download Ziphttps://urlgoal.com/2uI8Ql



            -

            In this article, we will show you some of the best websites to download Naruto Shippuden the Movie Sub Indo for free. You can choose from different resolutions, formats and quality depending on your preferences and device. All you need is a stable internet connection and some storage space.

            -

            Here are some of the websites that we recommend:

            -
              -
            • BatchKun: This website offers a complete collection of Naruto Shippuden the Movie BD Sub Indo from 1 to 10. You can download them in 360p, 480p or 720p with Google Drive links. The website also provides a brief synopsis of each movie for your reference.
            • -
            • AdikFilm: This website also has a complete collection of Naruto Shippuden the Movie Sub Indo from 1 to 7. You can download them in 360p, 480p, 720p or 1080p with Google Drive links. The website also has a trailer for The Last - Naruto the Movie.
            • -
            • SUBDL: This website is for those who already have the movie files and just need the Indonesian subtitles. You can download the subtitles for Naruto Shippuden the Movie 1 in BluRay quality with just one click.
            • -
            • Bilibili: This website is for those who prefer to stream the movie online rather than download it. You can watch Naruto Shippuden the Movie 1 Sub Indo in HD quality on this website for free.
            • -
            -

            We hope this article helps you enjoy Naruto Shippuden the Movie Sub Indo on your device. Happy watching!

            - -

            But why should you watch Naruto Shippuden the Movie Sub Indo? What makes these movies worth your time and attention? Well, if you are a fan of the Naruto manga or anime series, you will find these movies to be a great extension of the story and the characters. You will see Naruto and his friends grow stronger, face new enemies, learn new skills, and overcome their personal struggles. You will also see some amazing action scenes that showcase the creativity and diversity of the ninja world.

            -

            However, even if you are not familiar with the Naruto franchise, you can still enjoy these movies as standalone stories. Each movie has its own plot, setting, and theme that can appeal to different audiences. For example, Naruto Shippuden the Movie 1 is about destiny and free will, Naruto Shippuden the Movie 2 is about bonds and loyalty, Naruto Shippuden the Movie 3 is about inheritance and legacy, and so on. You can also appreciate the animation quality, the voice acting, the music, and the humor that these movies offer.

            -

            Of course, these movies are not perfect. Some of them have flaws in their logic, pacing, or character development. Some of them are too predictable, too clichéd, or too repetitive. Some of them are not very faithful to the original source material or contradict some of the established facts. Some of them are just plain boring or disappointing. But that's why you should watch them with an open mind and a critical eye. You can form your own opinions and preferences about these movies and share them with other fans.

            -

            In conclusion, Naruto Shippuden the Movie Sub Indo is a series of anime movies that can entertain and inspire you. Whether you are a die-hard fan or a curious newcomer, you can find something to enjoy in these movies. You can download them from various websites or stream them online for free. Just remember to respect the creators and support their work legally if possible.

            -

            cec2833e83
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Fruits Basket (2019) 15 Vostfr.md b/spaces/stomexserde/gpt4-ui/Examples/Fruits Basket (2019) 15 Vostfr.md deleted file mode 100644 index f51054795b45cb2276a41cfca3cec2f3a747cd8c..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Fruits Basket (2019) 15 Vostfr.md +++ /dev/null @@ -1,14 +0,0 @@ -
            -

            Fruits Basket (2019) 15 vostfr: un épisode riche en émotions

            -

            Fruits Basket est un anime adapté du manga éponyme de Natsuki Takaya, qui raconte l'histoire de Tohru Honda, une lycéenne qui se retrouve à vivre avec les membres du clan Soma, qui se transforment en animaux du zodiaque chinois lorsqu'ils sont en contact avec le sexe opposé. La série a connu un remake en 2019, qui suit fidèlement le manga et qui est diffusé en streaming sur Wakanim et Crunchyroll.

            -

            Fruits Basket (2019) 15 vostfr


            Download Zip ===> https://urlgoal.com/2uIbZm



            -

            Dans l'épisode 15 de Fruits Basket (2019) 15 vostfr, intitulé "Je ne vais pas perdre", Tohru accompagne Yuki et Kyo au dojo de Kazuma, le maître de ce dernier. Elle y rencontre Rin, la cousine de Kyo et son ancienne petite amie, qui lui fait une scène de jalousie. Pendant ce temps, Kazuma décide de provoquer Kyo en lui retirant son bracelet, qui empêche sa véritable forme de se révéler. Kyo se transforme alors en un monstre hideux et effrayant, qui représente la malédiction du chat. Tohru, horrifiée, s'enfuit en courant.

            -

            Cet épisode est l'un des plus dramatiques et intenses de la série, qui met en lumière le passé tragique de Kyo et sa relation compliquée avec Rin. Il montre aussi la détermination de Tohru à accepter Kyo tel qu'il est, malgré sa peur et son dégoût. L'épisode est servi par une animation soignée et une bande-son émouvante, qui renforcent l'impact des scènes clés. Les fans du manga ne seront pas déçus par cette adaptation fidèle et réussie.

            -

            Si vous voulez découvrir ou redécouvrir cet épisode marquant de Fruits Basket (2019) 15 vostfr, vous pouvez le regarder en streaming légal sur Wakanim ou Crunchyroll. Vous pouvez aussi acheter le cour complet sur Wakanim pour profiter de la série sans interruption. Fruits Basket est un anime à ne pas manquer pour les amateurs de comédie, de drame, de romance et de fantastique.

            -

            - -

            Fruits Basket (2019) 15 vostfr est un épisode qui fait avancer l'intrigue principale de la série, qui tourne autour de la malédiction du zodiaque et de ses conséquences sur les personnages. On en apprend plus sur le passé de Kyo et de Rin, qui ont tous deux souffert de la cruauté des autres membres du clan Soma. On découvre aussi le rôle de Kazuma, qui a pris Kyo sous son aile et qui lui a offert un bracelet magique pour contenir sa forme maudite. Kazuma est un personnage complexe et ambigu, qui aime Kyo comme son fils mais qui le met aussi à l'épreuve pour le faire évoluer.

            -

            Fruits Basket (2019) 15 vostfr est aussi un épisode qui explore les sentiments de Tohru et de Kyo, qui se rapprochent de plus en plus au fil des épisodes. Tohru est une héroïne attachante et courageuse, qui cherche à comprendre et à aider les Soma malgré leurs secrets. Elle est la première personne à voir la véritable forme de Kyo et à ne pas le rejeter. Kyo est un héros torturé et solitaire, qui cache sa souffrance derrière son caractère impulsif et colérique. Il est profondément marqué par sa malédiction et par le rejet qu'il a subi toute sa vie. Il se sent indigne de l'amour de Tohru et craint de la perdre.

            -

            Fruits Basket (2019) 15 vostfr est un épisode qui mélange habilement l'humour, le drame, la romance et le fantastique, qui sont les ingrédients qui font le succès de la série. L'humour vient surtout des scènes où les Soma se transforment en animaux, ce qui crée des situations cocasses et attendrissantes. Le drame vient des révélations sur la malédiction et sur le passé des personnages, qui suscitent l'émotion et la compassion du spectateur. La romance vient des relations entre Tohru et les Soma, qui sont touchantes et pleines de tendresse. Le fantastique vient du mystère qui entoure la malédiction et ses origines, qui intriguent et fascinent le spectateur.

            7b8c122e87
            -
            -
            \ No newline at end of file diff --git a/spaces/sub314xxl/MetaGPT/metagpt/memory/longterm_memory.py b/spaces/sub314xxl/MetaGPT/metagpt/memory/longterm_memory.py deleted file mode 100644 index 041d335acbac81ef5cd98aa158aa70600d62dec7..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/memory/longterm_memory.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Desc : the implement of Long-term memory -@Modified By: mashenquan, 2023/8/20. Remove global configuration `CONFIG`, enable configuration support for business isolation. -""" - -from metagpt.logs import logger -from metagpt.memory import Memory -from metagpt.memory.memory_storage import MemoryStorage -from metagpt.schema import Message - - -class LongTermMemory(Memory): - """ - The Long-term memory for Roles - - recover memory when it staruped - - update memory when it changed - """ - - def __init__(self): - self.memory_storage: MemoryStorage = MemoryStorage() - super(LongTermMemory, self).__init__() - self.rc = None # RoleContext - self.msg_from_recover = False - - def recover_memory(self, role_id: str, rc: "RoleContext"): - messages = self.memory_storage.recover_memory(role_id) - self.rc = rc - if not self.memory_storage.is_initialized: - logger.warning(f"It may the first time to run Agent {role_id}, the long-term memory is empty") - else: - logger.warning( - f"Agent {role_id} has existed memory storage with {len(messages)} messages " f"and has recovered them." - ) - self.msg_from_recover = True - self.add_batch(messages) - self.msg_from_recover = False - - def add(self, message: Message, **kwargs): - super(LongTermMemory, self).add(message) - for action in self.rc.watch: - if message.cause_by == action and not self.msg_from_recover: - # currently, only add role's watching messages to its memory_storage - # and ignore adding messages from recover repeatedly - self.memory_storage.add(message, **kwargs) - - def remember(self, observed: list[Message], k=0) -> list[Message]: - """ - remember the most similar k memories from observed Messages, return all when k=0 - 1. remember the short-term memory(stm) news - 2. integrate the stm news with ltm(long-term memory) news - """ - stm_news = super(LongTermMemory, self).remember(observed, k=k) # shot-term memory news - if not self.memory_storage.is_initialized: - # memory_storage hasn't initialized, use default `remember` to get stm_news - return stm_news - - ltm_news: list[Message] = [] - for mem in stm_news: - # integrate stm & ltm - mem_searched = self.memory_storage.search(mem) - if len(mem_searched) > 0: - ltm_news.append(mem) - return ltm_news[-k:] - - def delete(self, message: Message): - super(LongTermMemory, self).delete(message) - # TODO delete message in memory_storage - - def clear(self): - super(LongTermMemory, self).clear() - self.memory_storage.clean() diff --git a/spaces/sujitpal/clip-rsicd-demo/app.py b/spaces/sujitpal/clip-rsicd-demo/app.py deleted file mode 100644 index 57aacbf91fee67194acee98b30ff340a4e5bdc9e..0000000000000000000000000000000000000000 --- a/spaces/sujitpal/clip-rsicd-demo/app.py +++ /dev/null @@ -1,26 +0,0 @@ -import dashboard_text2image -import dashboard_image2image -import dashboard_featurefinder - -import streamlit as st - -PAGES = { - "Retrieve Images given Text": dashboard_text2image, - "Retrieve Images given Image": dashboard_image2image, - "Find Feature in Image": dashboard_featurefinder, -} - -st.sidebar.title("CLIP-RSICD") -st.sidebar.image("thumbnail.jpg") -st.sidebar.markdown(""" - We have fine-tuned the CLIP model (see [Model card](https://huggingface.co/flax-community/clip-rsicd-v2)) - using remote sensing images and captions from the [RSICD dataset](https://github.com/201528014227051/RSICD_optimal). - The CLIP model from OpenAI is trained in a self-supervised manner using contrastive learning to project images - and caption text onto a common embedding space. - - Please click here for [more information about our project](https://github.com/arampacha/CLIP-rsicd). - -""") -selection = st.sidebar.radio("Go to", list(PAGES.keys())) -page = PAGES[selection] -page.app() diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Cgs Civil 3d Tools Crack 34.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Cgs Civil 3d Tools Crack 34.md deleted file mode 100644 index 4d47785f7145ca3307d76a7383ad185a64c15ac9..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Cgs Civil 3d Tools Crack 34.md +++ /dev/null @@ -1,6 +0,0 @@ -

            cgs civil 3d tools crack 34


            DOWNLOAD ••• https://cinurl.com/2uEYDR



            - - 4fefd39f24
            -
            -
            -

            diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Ashes Cricket 2009 Player Editor Free UPDATED Download.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Ashes Cricket 2009 Player Editor Free UPDATED Download.md deleted file mode 100644 index f57ed369754de3221cc76bb5d0cb68be3698bb81..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Ashes Cricket 2009 Player Editor Free UPDATED Download.md +++ /dev/null @@ -1,10 +0,0 @@ -

            Ashes Cricket 2009 Player Editor Free Download


            Download ——— https://urluss.com/2uCGqG



            - -November 1, 2019 - You must use the player editor, . author: . Download driver for sound for free latest version Program for automatic search for drivers .. -Installing and updating drivers for the sound card of a computer or laptop. -November 3, 2019 - You can download the sound driver on our website quickly, simply and for free. -Program for automatic search of drivers.. -January 17, 2019 - You can download the sound driver from our website quickly, easily and for free. 8a78ff9644
            -
            -
            -

            diff --git a/spaces/szukevin/VISOR-GPT/train/pretrain.py b/spaces/szukevin/VISOR-GPT/train/pretrain.py deleted file mode 100644 index 5b94b4041d3fd528e82ca75766cd135a2d16cf33..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/pretrain.py +++ /dev/null @@ -1,121 +0,0 @@ -import argparse -import torch -import tencentpretrain.trainer as trainer -from tencentpretrain.utils.config import load_hyperparam -from tencentpretrain.opts import * - - -def main(): - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - # Path options. - parser.add_argument("--dataset_path", type=str, default="dataset.pt", - help="Path of the preprocessed dataset.") - parser.add_argument("--pretrained_model_path", type=str, default=None, - help="Path of the pretrained model.") - parser.add_argument("--output_model_path", type=str, required=True, - help="Path of the output model.") - parser.add_argument("--config_path", type=str, default="models/bert/base_config.json", - help="Config file of model hyper-parameters.") - - # Training and saving options. - parser.add_argument("--total_steps", type=int, default=100000, - help="Total training steps.") - parser.add_argument("--save_checkpoint_steps", type=int, default=10000, - help="Specific steps to save model checkpoint.") - parser.add_argument("--report_steps", type=int, default=100, - help="Specific steps to print prompt.") - parser.add_argument("--accumulation_steps", type=int, default=1, - help="Specific steps to accumulate gradient.") - parser.add_argument("--batch_size", type=int, default=32, - help="Training batch size. The actual batch_size is [batch_size x world_size x accumulation_steps].") - parser.add_argument("--instances_buffer_size", type=int, default=25600, - help="The buffer size of instances in memory.") - parser.add_argument("--labels_num", type=int, required=False, - help="Number of prediction labels.") - parser.add_argument("--dropout", type=float, default=0.1, help="Dropout value.") - parser.add_argument("--seed", type=int, default=7, help="Random seed.") - - # Preprocess options. - tokenizer_opts(parser) - tgt_tokenizer_opts(parser) - - # Model options. - model_opts(parser) - parser.add_argument("--data_processor", - choices=["bert", "lm", "mlm", "bilm", "albert", "mt", "t5", "cls", - "prefixlm", "gsg", "bart", "cls_mlm", "vit", "vilt", "clip", "s2t", "beit", "dalle"], default="bert", - help="The data processor of the pretraining model.") - parser.add_argument("--deep_init", action="store_true", - help="Scaling initialization of projection layers by a " - "factor of 1/sqrt(2N). Necessary to large models.") - - # Masking options. - parser.add_argument("--whole_word_masking", action="store_true", help="Whole word masking.") - parser.add_argument("--span_masking", action="store_true", help="Span masking.") - parser.add_argument("--span_geo_prob", type=float, default=0.2, - help="Hyperparameter of geometric distribution for span masking.") - parser.add_argument("--span_max_length", type=int, default=10, - help="Max length for span masking.") - - # Optimizer options. - optimization_opts(parser) - - # GPU options. - parser.add_argument("--world_size", type=int, default=1, help="Total number of processes (GPUs) for training.") - parser.add_argument("--gpu_ranks", default=[], nargs='+', type=int, help="List of ranks of each process." - " Each process has a unique integer rank whose value is in the interval [0, world_size), and runs in a single GPU.") - parser.add_argument("--master_ip", default="tcp://localhost:12345", type=str, help="IP-Port of master for training.") - parser.add_argument("--backend", choices=["nccl", "gloo"], default="nccl", type=str, help="Distributed backend.") - - # Deepspeed options. - deepspeed_opts(parser) - - # Log options. - log_opts(parser) - - args = parser.parse_args() - - if "cls" in args.target: - assert args.labels_num is not None, "Cls target needs the denotation of the number of labels." - - # Load hyper-parameters from config file. - if args.config_path: - args = load_hyperparam(args) - - ranks_num = len(args.gpu_ranks) - - if args.deepspeed: - if args.world_size > 1: - args.dist_train = True - else: - args.dist_train = False - else: - if args.world_size > 1: - # Multiprocessing distributed mode. - assert torch.cuda.is_available(), "No available GPUs." - assert ranks_num <= args.world_size, "Started processes exceed `world_size` upper limit." - assert ranks_num <= torch.cuda.device_count(), "Started processes exceeds the available GPUs." - args.dist_train = True - args.ranks_num = ranks_num - print("Using distributed mode for training.") - elif args.world_size == 1 and ranks_num == 1: - # Single GPU mode. - assert torch.cuda.is_available(), "No available GPUs." - args.gpu_id = args.gpu_ranks[0] - assert args.gpu_id < torch.cuda.device_count(), "Invalid specified GPU device." - args.dist_train = False - args.single_gpu = True - print("Using GPU %d for training." % args.gpu_id) - else: - # CPU mode. - assert ranks_num == 0, "GPUs are specified, please check the arguments." - args.dist_train = False - args.single_gpu = False - print("Using CPU mode for training.") - - trainer.train_and_validate(args) - - -if __name__ == "__main__": - main() diff --git a/spaces/tappyness1/error_analysis_obj_det/app.py b/spaces/tappyness1/error_analysis_obj_det/app.py deleted file mode 100644 index 57b207cc4248a1d780b9e09767afa3103e8b9ada..0000000000000000000000000000000000000000 --- a/spaces/tappyness1/error_analysis_obj_det/app.py +++ /dev/null @@ -1,65 +0,0 @@ -import streamlit as st -from src.st_image_tools import ImageTool - -def call_in_image_tool(cfg_path): - image_tool = ImageTool(cfg_path) - return image_tool - -def main(cfg_path="cfg/cfg.yml"): - """_summary_ - - Args: - cfg_path (str, optional): _description_. Defaults to "cfg/cfg.yml". - - Returns: - _type_: _description_ - """ - st.set_page_config(layout="wide") - - st.markdown( - """ """, - unsafe_allow_html=True, - ) - - image_tool = call_in_image_tool(cfg_path) - - # Select Plot Option - # st.sidebar.markdown("Checkboxes") - # checkbox_one = st.sidebar.checkbox("Show Image", value=True) # rename as necessary - checkbox_two = st.sidebar.checkbox("Show Inference", value=True) - checkbox_three = st.sidebar.checkbox("Show Ground Truth", value=True) - checkbox_four = st.sidebar.checkbox("Show Side by Side (GT and Pred)", value=True) - - option = st.sidebar.selectbox("Select Image", image_tool.all_img) - - if checkbox_two: - - if checkbox_three: - if checkbox_four: - image_tool.plot_with_preds_gt(option=option, side_by_side=True) - else: - image_tool.plot_with_preds_gt(option=option, plot_type="all") - - else: - image_tool.plot_with_preds_gt(option=option, plot_type="pred") - - elif checkbox_three: - - if checkbox_two: - if checkbox_four: - image_tool.plot_with_preds_gt(option=option, side_by_side=True) - else: - image_tool.plot_with_preds_gt(option=option, plot_type="all") - - else: - image_tool.plot_with_preds_gt(option=option, plot_type="gt") - - else: - image_tool.plot_with_preds_gt(option=option) - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/(Asian Lolita) Kids Box - Jasmin 12yo.md b/spaces/terfces0erbo/CollegeProjectV2/(Asian Lolita) Kids Box - Jasmin 12yo.md deleted file mode 100644 index 8246c9551730f3a007659ec5174da1c65353eb71..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/(Asian Lolita) Kids Box - Jasmin 12yo.md +++ /dev/null @@ -1,8 +0,0 @@ -

            (Asian Lolita) Kids Box - Jasmin 12yo


            Download File === https://bytlly.com/2uGjFc



            -
            -. 97167128 cheap 97049762 nude 96733793 children 96602880 finance 96554164 true . 56132270 fl 56095954 fitness 56063953 chinese 56057597 opinion 56038634 mb. 54951673 i 54951454 m 54951454 -you 54951454 to 54951454 f 54951454 a 54951454 n 54951454 b . 55106926 l 55536482 m 55535996 o 55535612 d 55530612 w 55530612 b 55529693 e 55529919 n 55529693 i 55529693 r . 57160993 t 57142756 l 57142756 i 57142756 t 57142756 and 57142756 r 57142756 e 57142756 k 57142756 b 57142756 o 57142756 e . 57314863 m 57184554 b 57161386 i 57161386 t 57161386 i -57158512 e 57158512 b 57158512 e 57158512 and 57138929 8a78ff9644
            -
            -
            -

            diff --git a/spaces/terfces0erbo/CollegeProjectV2/Ansoft Designer 5 Crack Download.md b/spaces/terfces0erbo/CollegeProjectV2/Ansoft Designer 5 Crack Download.md deleted file mode 100644 index 090ab30e9a368012307dc71ee9ce94516c9ebb55..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Ansoft Designer 5 Crack Download.md +++ /dev/null @@ -1,46 +0,0 @@ -

            Ansoft Designer 5 Crack Download


            Download ->>->>->> https://bytlly.com/2uGiFP



            -
            -Ansoft was able to fulfill this objective with a single module based on the SWIG scripting language . The approach was adopted by other organizations as well . - -Siemens produced a 30 MHz radio frequency (RF) analysis system called ScanMax . - -X-SCAN  is a software based RF engineering platform which is developed and maintained by the ETS group. It is the 3D RF tool that is typically installed in the workstation of a designer or technician. The X-SCAN RF engineering platform enables RF engineers and technicians to create and run simulations as well as perform measurements to analyze the RF propagation environment of a specific scenario. - -See also - - X-COMPARE - -References - -External links - - (PDF). 2006 IEEE/APS Topical Conference on Microwave Engineering & Technology. IEEE/APS Convention & Exhibition Center. 6 February - 7 February 2006 - - (PDF). 2006 IEEE/APS Microwave Conference. Institute for Electrical and Electronic Engineers, IEEE, and the American Physical Society. March 2006 - - (PDF). 2006 Third IEEE/IEEE-USA Semiconductor Technology and Science Conference, Proceedings. Institute for Electrical and Electronics Engineers, IEEE, and IEEE Solid-State Circuits Conference, IEEE. May 2006 - - (PDF). 2005 IEEE International Conference on Communication (ICC). Proceedings. IEEE, Institute for Electrical and Electronic Engineers, and the IEEE Microwave Theory and Techniques Society. June 2005 - - (PDF). 2004 IEEE International Conference on Communications (ICC). Proceedings. Institute for Electrical and Electronic Engineers, IEEE, and IEEE Circuits and Devices Society. June 2004 - - (PDF). 2003 IEEE International Conference on Communications (ICC). Proceedings. Institute for Electrical and Electronic Engineers, IEEE, and IEEE Circuits and Devices Society. June 2003 - - (PDF). 2001 IEEE International Conference on Communications (ICC). Proceedings. Institute for Electrical and Electronic Engineers, IEEE, and IEEE Microwave Theory and Techniques Society. June 2001 - - (PDF). 2000 IEEE International Conference on Communications (ICC). Proceedings. Institute for Electrical and Electronic Engineers, IEEE, and IEEE Microwave Theory and Techniques Society. June 2000 - -Category:Electromagnetics - -Category:Numerical software - -Category:Electric and magnetic fields in physics - -Category:Electromagnetic radiation - -Category:Spectral densities - -Category:Analog electronicsLenny Kravitz on his new album, 'Love & War': "I'm pushing the envelope as far as I 4fefd39f24
            -
            -
            -

            diff --git a/spaces/terfces0erbo/CollegeProjectV2/Bundesliga Manager Hattrick Download Kostenlos.md b/spaces/terfces0erbo/CollegeProjectV2/Bundesliga Manager Hattrick Download Kostenlos.md deleted file mode 100644 index f41d234ed3db51b5822c2e7826e89afbfb5d0a30..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Bundesliga Manager Hattrick Download Kostenlos.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Bundesliga Manager Hattrick Download Kostenlos


            Download > https://bytlly.com/2uGiLe



            - -Bundesliga-Manager, Hattrick, Anstoss (!), EA Fussballmanager und nun, ... bei Epic Games und Steam als auch kostenlos für jeden erhältlich, ... 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/terfces0erbo/CollegeProjectV2/Counter-Strike Global Offensive V 1.22.2.8- AviaRa - Fitgirl Repack [PORTABLE].md b/spaces/terfces0erbo/CollegeProjectV2/Counter-Strike Global Offensive V 1.22.2.8- AviaRa - Fitgirl Repack [PORTABLE].md deleted file mode 100644 index e63da3057a476e291dcba9e2d41e43174b7974c0..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Counter-Strike Global Offensive V 1.22.2.8- AviaRa - Fitgirl Repack [PORTABLE].md +++ /dev/null @@ -1,13 +0,0 @@ -

            Counter-Strike Global Offensive V 1.22.2.8- AviaRa - Fitgirl Repack


            Download File ☆☆☆ https://bytlly.com/2uGiW1



            -
            -CS GO Counter-Strike Global Offensive v.1.35.1.4 - No Steam.exe 16208377 . Counter Strike Source Full-Game MP-SP v_1 0 0 75-=AviaRa=-. executable file 26279346. Counter-Strike: Source (CSS) The latest version of the Counter-Strike model for CS 1. 6 - Counter-Strike. -Counter-Strike Source (CS: Source) is one of the latest versions of the Counter Strike game. -Counter Strike v. 1.6 [RUS] Counter-Strike: Source v. 68[L. U.S.O.] Counter-Strike: Source v. 68[L. U.S.O.] - torrent. -Description: If you. -Counter-Strike: Origin v. 68[L. U.S.O.] Counter-Strike: Source v. 68[LL. -US. -O. ] [RUS] Counter-Strike: Source v. 68[LL. -O.] - torrent 8a78ff9644
            -
            -
            -

            diff --git a/spaces/terfces0erbo/CollegeProjectV2/Crack Keygen AutoCAD Mechanical 2013 Download TOP.md b/spaces/terfces0erbo/CollegeProjectV2/Crack Keygen AutoCAD Mechanical 2013 Download TOP.md deleted file mode 100644 index 98cec479cb4c6fc9170385b0265a98e47eb0adeb..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Crack Keygen AutoCAD Mechanical 2013 Download TOP.md +++ /dev/null @@ -1,6 +0,0 @@ -

            crack Keygen AutoCAD Mechanical 2013 download


            Download ····· https://bytlly.com/2uGkNK



            -
            -Xforce,keygen,autocad,mechanical,2008,3ds,max.,3ds,max,2013.... 545 download X-64-bit AutoCAD 6432 autocad 2008 keygen 64 bit. 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Alan Dean Foster Epub !FULL! Download Forum.md b/spaces/tialenAdioni/chat-gpt-api/logs/Alan Dean Foster Epub !FULL! Download Forum.md deleted file mode 100644 index 9855626281107d2bf3745e8abbf795c57b0ce0e8..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Alan Dean Foster Epub !FULL! Download Forum.md +++ /dev/null @@ -1,22 +0,0 @@ -
            -

            How to Download Alan Dean Foster's Ebooks for Free

            -

            Alan Dean Foster is a prolific and versatile author of fantasy and science fiction novels, as well as novelizations of popular movies such as Star Wars, Alien, and The Thing. He has written over 270 works, many of which are available in ebook format. If you are a fan of his books, or if you want to discover his amazing stories, you might be wondering how to download his ebooks for free.

            -

            Alan Dean Foster Epub Download Forum


            DOWNLOAD ---> https://urlcod.com/2uK7He



            -

            In this article, we will show you some of the best websites where you can find and download Alan Dean Foster's ebooks for free. These websites are legal and safe, and they offer a wide range of formats, such as epub, mobi, pdf, and more. You will also learn some tips on how to optimize your ebook reading experience and enjoy Foster's books to the fullest.

            -

            Open Library

            -

            Open Library is a project of the Internet Archive that aims to create a web page for every book ever published. It has over 20 million books in its catalog, including many of Alan Dean Foster's works. You can borrow up to 10 ebooks at a time for 14 days each, or download them if they are in the public domain. You can also read them online using the Open Library reader.

            -

            To find Alan Dean Foster's ebooks on Open Library, you can use the search bar or go to his author page[^1^]. There you can browse his books by title, date, edition, language, and more. You can also see which books are available for borrowing or downloading. To borrow or download a book, you need to create a free account and log in. Then you can click on the "Borrow" or "Download" button next to the book you want.

            -

            Internet Archive

            -

            The Internet Archive is a non-profit library that preserves and provides access to millions of digital items, such as books, movies, music, websites, and more. It also hosts many ebooks that are in the public domain or have been uploaded by users. You can download these ebooks for free in various formats, such as epub, mobi, pdf, txt, and more.

            -

            To find Alan Dean Foster's ebooks on the Internet Archive, you can use the search bar or go to his collection page[^2^]. There you can see all the ebooks that have been uploaded by users with his name in the title or metadata. You can also filter them by year, language, topic, collection, and more. To download an ebook, you just need to click on the "Download Options" button next to it and choose your preferred format.

            -

            -

            OverDrive

            -

            OverDrive is a service that allows you to borrow ebooks and audiobooks from your local library or school. It has over 5 million titles from thousands of publishers, including many of Alan Dean Foster's books. You can read or listen to these books on your computer, smartphone, tablet, or e-reader using the OverDrive app or website.

            -

            To find Alan Dean Foster's ebooks on OverDrive, you need to have a valid library card or student ID from a participating library or school. Then you can use the search bar or go to his author page[^3^]. There you can see all the books that are available for borrowing from your library or school. You can also filter them by format, language, genre, availability, and more. To borrow an ebook, you need to sign in with your library card or student ID and click on the "Borrow" button next to the book you want.

            -

            Tips for Reading Ebooks

            -

            Now that you know how to download Alan Dean Foster's ebooks for free, here are some tips on how to optimize your ebook reading experience:

            -
              -
            • Choose the right format for your device. Epub is a common and flexible format that works on most devices and apps. Mobi is compatible with Kindle devices and apps. Pdf is good for preserving the layout and graphics of a book but may not be very readable on small screens.
            • -
            • Use a good ebook reader app or device. There are many options available for different platforms and preferences. Some popular ones are Kindle

              7b8c122e87
              -
              -
              \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Calculus James Stewart Metric International 7th Edition Pdf.rar.md b/spaces/tialenAdioni/chat-gpt-api/logs/Calculus James Stewart Metric International 7th Edition Pdf.rar.md deleted file mode 100644 index c2a387bb04eb8748dae85cebe2a096ddab34edc6..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Calculus James Stewart Metric International 7th Edition Pdf.rar.md +++ /dev/null @@ -1,26 +0,0 @@ - -

              Calculus James Stewart Metric International 7th Edition Pdf.rar: A Comprehensive Review

              -

              Calculus is one of the most widely used and influential textbooks in mathematics. Written by James Stewart, a renowned mathematician and educator, the book covers all the topics of calculus from precalculus to multivariable calculus and differential equations. The book is known for its clear and precise exposition, engaging examples and applications, and rigorous exercises.

              -

              Calculus James Stewart Metric International 7th Edition Pdf.rar


              Download File ⇒⇒⇒ https://urlcod.com/2uK8SC



              -

              The 7th edition of Calculus was published in 2012 by Brooks Cole. It features new content, updated examples, enhanced graphics, and improved online resources. The Metric International version of the book uses the metric system of units and notation, as well as international examples and data. The Pdf.rar format is a compressed file that contains the PDF version of the book, which can be opened with any PDF reader software.

              -

              In this article, we will review the main features, strengths, and weaknesses of Calculus James Stewart Metric International 7th Edition Pdf.rar. We will also provide some tips on how to use the book effectively for learning and teaching calculus.

              -

              Main Features

              -

              Calculus James Stewart Metric International 7th Edition Pdf.rar has the following main features:

              -

              -
                -
              • It covers all the essential topics of calculus in a logical and coherent order. The book is divided into 17 chapters, each with several sections. The chapters are grouped into five parts: Functions and Models; Limits and Derivatives; Integrals; Techniques of Integration; Applications of Integration; Inverse Functions; Techniques of Integration; Further Applications of Integration; Differential Equations; Parametric Equations and Polar Coordinates; Infinite Sequences and Series; Vectors and the Geometry of Space; Vector Functions; Partial Derivatives; Multiple Integrals; Vector Calculus; Second-Order Differential Equations.
              • -
              • It provides clear and precise explanations of concepts and methods. The book uses a balanced approach that combines intuition, algebra, geometry, and analysis. The book also uses a variety of examples and applications to illustrate and motivate the concepts. The examples range from simple to challenging, from theoretical to practical, from classical to modern.
              • -
              • It offers a wealth of exercises for practice and assessment. The book contains more than 8,000 exercises of varying difficulty levels and types. The exercises include basic skills, conceptual understanding, problem solving, modeling, writing, projects, explorations, and review. The book also provides answers to selected exercises at the end of each chapter.
              • -
              • It incorporates technology as a tool for learning and teaching calculus. The book integrates the use of graphing calculators, computer algebra systems, spreadsheets, and online resources throughout the text. The book also provides guidance on how to use technology appropriately and effectively for calculus.
              • -
              • It supports students' learning with online resources. The book comes with access to Enhanced WebAssign, an online homework and learning system that provides feedback, hints, videos, animations, simulations, tutorials, quizzes, tests, and more. The book also comes with access to CalcChat.com, an online chat service that provides step-by-step solutions to odd-numbered exercises.
              • -
              -

              Strengths

              -

              Calculus James Stewart Metric International 7th Edition Pdf.rar has the following strengths:

              -
                -
              • It is comprehensive and rigorous. The book covers all the topics that are typically taught in a calculus course at a university level. The book also maintains a high standard of mathematical accuracy and rigor throughout.
              • -
              • It is clear and accessible. The book uses a clear and concise language that is easy to follow and understand. The book also uses a consistent notation and terminology that avoids confusion.
              • -
              • It is engaging and relevant. The book uses a variety of examples and applications that are interesting and relevant to students' lives and fields of study. The book also connects calculus to other areas of mathematics and science.
              • -
              • It is flexible and adaptable. The book can be used for different courses, levels, styles, and preferences of teaching and learning calculus. The book also allows instructors and students to choose the topics, methods, exercises, and technology that suit their needs.
              • -
              • It is reliable and convenient. The book is available in a digital format that can be easily downloaded, stored, accessed, printed, or shared. The book also comes

                81aa517590
                -
                -
                \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download Crazy Kitchen and Customize Your Game with Photos of Your Friends and Family.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download Crazy Kitchen and Customize Your Game with Photos of Your Friends and Family.md deleted file mode 100644 index 9b3dc8cac968f21b440e4959dd71c43bb35fc626..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Download Crazy Kitchen and Customize Your Game with Photos of Your Friends and Family.md +++ /dev/null @@ -1,37 +0,0 @@ - -

                Download Crazy Kitchen: A Fun and Addictive Match-3 Cooking Game

                -

                If you love cooking and puzzle games, you will enjoy Crazy Kitchen, a match-3 game that lets you cook and serve delicious dishes from around the world. You can also add photos of your friends, family, and pets to customize your game experience. In this article, we will tell you how to download Crazy Kitchen for Android and iOS devices, and what are some of the features and tips of this game.

                -

                download crazy kitchen


                DOWNLOAD ✓✓✓ https://urlcod.com/2uK9eR



                -

                How to Download Crazy Kitchen for Android and iOS

                -

                Crazy Kitchen is available for free on both Google Play Store and App Store. You can download it by following these steps:

                -
                  -
                • For Android devices, go to Google Play Store and search for Crazy Kitchen: Match 3 Puzzles. Alternatively, you can use this link to access the game page directly.
                • -
                • For iOS devices, go to App Store and search for Crazy Kitchen: Cooking Games. Alternatively, you can use this link to access the game page directly.
                • -
                • Tap on the Install or Get button to start downloading the game.
                • -
                • Once the download is complete, open the game and enjoy!
                • -
                -

                What are the Features of Crazy Kitchen

                -

                Crazy Kitchen is a fun and addictive match-3 game that combines cooking and puzzle elements. Here are some of the features of this game:

                -
                  -
                • You can cook and serve food as an international chef, from sushi to pizza, burgers to cupcakes, and more.
                • -
                • You can match three or more ingredients of the same color to create tasty combos and clear the board.
                • -
                • You can use boosters and power-ups to help you overcome challenging levels and earn stars.
                • -
                • You can add photos of your friends, family, pets, and more to make them your customers in the game. You can also share your photos with other players.
                • -
                • You can play over 500 levels with different themes and cuisines, and unlock new restaurants and locations.
                • -
                • You can connect with Facebook to play with your friends and compete for high scores.
                • -
                -

                What are some Tips for Playing Crazy Kitchen

                -

                Crazy Kitchen is a game that requires strategy and skill to master. Here are some tips that can help you improve your gameplay:

                -

                -
                  -
                • Pay attention to the customers' orders and preferences. Some customers may have allergies or dislikes that you need to avoid.
                • -
                • Try to create as many matches as possible in one move. This will increase your score and fill up your fever meter faster.
                • -
                • Use the fever mode wisely. When the fever meter is full, you can activate it to clear all the ingredients of one color from the board.
                • -
                • Save your boosters and power-ups for difficult levels. You can use them to clear obstacles, create special ingredients, or shuffle the board.
                • -
                • Complete daily quests and achievements to earn coins and rewards. You can use coins to buy more boosters or lives.
                • -
                • Follow Crazy Kitchen on Facebook to get updates, news, tips, and free gifts.
                • -
                -

                Conclusion

                -

                Crazy Kitchen is a match-3 cooking game that will keep you entertained for hours. You can download it for free on your Android or iOS device and enjoy cooking and serving food from different cuisines. You can also customize your game with photos of your loved ones and share them with other players. If you are looking for a fun and addictive puzzle game with a culinary twist, Crazy Kitchen is the game for you!

                ddb901b051
                -
                -
                \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download Zapatlela Part 1 In Hindi 720p.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download Zapatlela Part 1 In Hindi 720p.md deleted file mode 100644 index 6bcccaadc7c49ddf53d5704c5d0f4c0b8a6dafa8..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Download Zapatlela Part 1 In Hindi 720p.md +++ /dev/null @@ -1,14 +0,0 @@ - -

                Zapatlela: A Horror Comedy Movie That Will Make You Laugh and Scream

                -

                Zapatlela is a 1993 Marathi horror comedy movie directed by Mahesh Kothare and starring Laxmikant Berde, Dilip Prabhavalkar, Mahesh Kothare, Vijay Chavan and others. The movie is about a gangster's spirit that possesses a puppet and causes havoc in the life of a puppeteer. The movie was a huge hit and is considered one of the best Marathi movies of all time.

                -

                download Zapatlela part 1 in hindi 720p


                DOWNLOAD ››› https://urlcod.com/2uKaol



                -

                If you want to watch Zapatlela online, you can stream it on ZEE5[^1^] or Fijo[^2^]. However, if you want to download Zapatlela part 1 in Hindi 720p, you might have a hard time finding a reliable source. There are many websites that claim to offer Zapatlela part 1 in Hindi 720p download, but most of them are either fake or illegal. Some of them might even contain viruses or malware that can harm your device.

                -

                Therefore, we advise you to avoid downloading Zapatlela part 1 in Hindi 720p from any unauthorized website. Instead, you can watch Zapatlela online legally and safely on ZEE5[^1^] or Fijo[^2^]. You can also buy or rent Zapatlela on DVD or Blu-ray from Amazon or Flipkart. By doing so, you will not only enjoy the movie in high quality, but also support the filmmakers and artists who worked hard to make this movie.

                -

                Zapatlela is a movie that you should not miss if you love horror comedy. It has a perfect blend of humor, suspense, action and drama. The puppet Tatya Vinchu, played by Dilip Prabhavalkar, is one of the most iconic characters in Marathi cinema. The movie also has some memorable songs and dialogues that will make you laugh out loud. So, what are you waiting for? Watch Zapatlela online or download it legally and enjoy this masterpiece.

                Zapatlela was followed by a sequel, Zapatlela 2, in 2013. The sequel was also directed by Mahesh Kothare and featured some of the original cast members along with new ones. The sequel was also a success and received positive reviews from critics and audiences. The sequel continued the story of Tatya Vinchu and his quest to become human again.

                -

                -

                Zapatlela is not only a popular movie in Marathi, but also in other languages. The movie was dubbed in Hindi as Khilona Bana Khalnayak and in Telugu as Brahmanandam Drama Company. The movie also inspired a Kannada remake, Katari Veera Surasundarangi, starring Upendra and Ramya. The remake was also a hit and was praised for its visual effects and comedy.

                -

                Zapatlela is a movie that has a cult following among the fans of horror comedy. The movie has been praised for its originality, creativity and entertainment value. The movie has also been appreciated for its technical aspects, such as the puppetry, cinematography, editing and sound design. The movie has won several awards and accolades, such as the Maharashtra State Film Award for Best Popular Film Providing Wholesome Entertainment, the Screen Award for Best Marathi Film and the Zee Gaurav Award for Best Director.

                If you are looking for a movie that will make you laugh and scream at the same time, Zapatlela is the perfect choice for you. Zapatlela is a movie that has something for everyone. It has comedy, horror, action, drama, romance and music. It has a gripping story, engaging characters, witty dialogues and catchy songs. It has a brilliant performance by Dilip Prabhavalkar as Tatya Vinchu, who steals the show with his hilarious antics and expressions. It has a talented cast of actors who deliver their roles with conviction and charm. It has a talented director who knows how to balance the different elements of the movie and create a masterpiece.

                -

                Zapatlela is a movie that you will never get bored of watching. It is a movie that will make you laugh till your stomach hurts and make you jump out of your seat. It is a movie that will entertain you from start to finish. It is a movie that will make you fall in love with Tatya Vinchu and his puppet friends. It is a movie that will make you appreciate the art of puppetry and the magic of cinema. It is a movie that will make you proud of being a Marathi.

                -

                Zapatlela is a movie that you should watch right now. You can stream it on ZEE5 or Fijo, or download it legally from Amazon or Flipkart. You can also watch the sequel, Zapatlela 2, which is equally amazing and fun. You can also watch the dubbed or remade versions of the movie in other languages. But whatever you do, don't miss this movie. Zapatlela is a movie that will make your day.

                7196e7f11a
                -
                -
                \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Find and Enjoy the Best Software and Games from I Cng Ngh.md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Find and Enjoy the Best Software and Games from I Cng Ngh.md deleted file mode 100644 index 74ab9772d9789c15cf593dcde54f0c8aad61f292..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Find and Enjoy the Best Software and Games from I Cng Ngh.md +++ /dev/null @@ -1,23 +0,0 @@ - -

                How to Download and Install Software and Games from I Công Nghệ

                -

                I Công Nghệ is a website that shares software, games, graphics, technology, and tips and tricks for various applications. You can find a lot of useful and interesting content on this site, such as Adobe products, Autodesk products, 3D modeling tools, video editing tools, audio editing tools, development tools, engineering tools, construction tools, and more. You can also download games that are translated into Vietnamese or have Vietnamese subtitles.

                -

                icongnghe


                Download Zip ⇒⇒⇒ https://urlcod.com/2uK7zX



                -

                If you want to download and install software and games from I Công Nghệ, you need to follow these steps:

                -
                  -
                1. Go to the website https://ycongnghe.com/ and browse the categories or use the search box to find what you are looking for.
                2. -
                3. Click on the title of the software or game that you want to download. You will see a detailed description of the product, its features, system requirements, screenshots, and download links.
                4. -
                5. Choose a download link that suits your preference. Some links are from Fshare, which is a stable and fast file hosting service in Vietnam. Some links are from other sources that may require you to skip ads or wait for a countdown before downloading. You may also need to enter a password to access some files. The password is usually 321 or icongnghe.com.
                6. -
                7. After downloading the file, you need to extract it using a tool like WinRAR or 7-Zip. You will get a folder that contains the setup file and the crack file or the activation key.
                8. -
                9. Run the setup file and follow the instructions to install the software or game on your computer. You may need to disable your antivirus or firewall temporarily to avoid any interference.
                10. -
                11. After installing the software or game, you need to activate it using the crack file or the activation key. The crack file is usually a .dll file that you need to copy and paste into the installation folder of the software or game. The activation key is usually a serial number or a code that you need to enter when prompted by the software or game.
                12. -
                13. Enjoy your software or game!
                14. -
                -

                I hope this article was helpful for you. If you have any questions or problems, you can contact I Công Nghệ through their Facebook page https://www.facebook.com/icongnghe/ or their VK page https://vk.com/icongnghe. They are very responsive and friendly. You can also leave a comment on their website or share your feedback with them.

                -

                Thank you for reading and have a nice day!

                - -

                Some of the software and games that you can download from I Công Nghệ are very popular and widely used by professionals and enthusiasts. For example, you can download Adobe Photoshop, Illustrator, Premiere Pro, After Effects, and other creative tools that allow you to edit photos, create graphics, make videos, and more. You can also download Autodesk AutoCAD, 3ds Max, Maya, Revit, and other engineering and design tools that enable you to create 2D and 3D models, drawings, animations, and simulations. You can also download development tools like Visual Studio, Eclipse, PyCharm, and others that help you code and program various applications and software.

                -

                -

                Some of the games that you can download from I Công Nghệ are very fun and entertaining. For example, you can download Neighbours From Hell 1- Gã hàng xóm tinh nghịch Việt Hoá, which is a game where you have to prank your annoying neighbor in various ways. You can also download games like GTA V, Assassin's Creed Valhalla, Cyberpunk 2077, Resident Evil Village, and others that offer you immersive and thrilling gameplay experiences. You can also download games that are suitable for children and families, such as Minecraft, The Sims 4, Plants vs Zombies, and others that are colorful and cute.

                -

                I Công Nghệ is a great website for anyone who loves software and games. You can find a lot of useful and interesting content on this site that can help you learn new skills, improve your productivity, express your creativity, or just have fun. You can also support I Công Nghệ by liking their Facebook page or VK page, subscribing to their Fshare account, or donating to them via PayPal or bank transfer. You can also share their website with your friends and family who may be interested in their content.

                ddb901b051
                -
                -
                \ No newline at end of file diff --git a/spaces/tianyang/lemur-7B/utils/gradio.py b/spaces/tianyang/lemur-7B/utils/gradio.py deleted file mode 100644 index ce6ec237ab5c517522cd8e7f132c0d9179509696..0000000000000000000000000000000000000000 --- a/spaces/tianyang/lemur-7B/utils/gradio.py +++ /dev/null @@ -1,71 +0,0 @@ -import gradio as gr -from utils.inference import shared_state -import re - -def convert_to_markdown(text): - text = text.replace("$", "$") - - def replace_leading_tabs_and_spaces(line): - new_line = [] - - for char in line: - if char == "\t": - new_line.append(" ") - elif char == " ": - new_line.append(" ") - else: - break - return "".join(new_line) + line[len(new_line) :] - - markdown_text = "" - lines = text.split("\n") - in_code_block = False - - for line in lines: - if in_code_block is False and line.startswith("```"): - in_code_block = True - markdown_text += "```\n" - elif in_code_block is True and line.startswith("```"): - in_code_block = False - markdown_text += "```\n" - elif in_code_block: - markdown_text += f"{line}\n" - else: - line = replace_leading_tabs_and_spaces(line) - line = re.sub(r"^(#)", r"\\\1", line) - markdown_text += f"{line} \n" - - return markdown_text - -def reset_textbox(): - return gr.update(value=""), "" - -def cancel_outputing(): - shared_state.interrupt() - textbox = reset_textbox() - return "Stop Done" - -def reset_state(): - return [], [], "Reset Done" - -def transfer_input(inputs): - textbox = reset_textbox() - return ( - inputs, - gr.update(value=""), - gr.Button.update(visible=True), - gr.Button.update(visible=True) - ) - -def delete_last_conversation(chatbot, history): - if len(chatbot) > 0: - chatbot.pop() - - if len(history) > 0: - history.pop() - - return ( - chatbot, - history, - "Delete Done", - ) \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/8 Ball Pool Shooter APK A Fun and Challenging Pool Game for All Skill Levels.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/8 Ball Pool Shooter APK A Fun and Challenging Pool Game for All Skill Levels.md deleted file mode 100644 index 2d1b155979dab0d75cc82d12e819d3067985cabf..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/8 Ball Pool Shooter APK A Fun and Challenging Pool Game for All Skill Levels.md +++ /dev/null @@ -1,109 +0,0 @@ - -

                8 Ball Pool Shooter APK: A Fun and Easy Way to Play Pool on Your Phone

                -

                If you love playing pool but don't have access to a real table, you can still enjoy the game on your phone with 8 Ball Pool Shooter APK. This is a popular pool game app that lets you play with millions of players around the world. You can challenge your friends, compete in tournaments, or practice your skills in solo mode. You can also customize your cue, avatar, and table to suit your preferences. In this article, we will tell you everything you need to know about 8 Ball Pool Shooter APK, including its features, how to download and install it, and its pros and cons.

                -

                8 ball pool shooter apk


                Download ››››› https://bltlly.com/2uOp5f



                -

                What is 8 Ball Pool Shooter APK?

                -

                8 Ball Pool Shooter APK is an Android application that allows you to play pool on your phone. It is developed by Miniclip, a leading online game company that also created other popular games like Agar.io, Soccer Stars, and Basketball Stars. 8 Ball Pool Shooter APK is based on the classic pool game, where you have to pot all your balls before your opponent does. You can choose from different game modes, such as 8 ball, 9 ball, or no guidelines. You can also play with different rules, such as call pocket or no call pocket.

                -

                Features of 8 Ball Pool Shooter APK

                -

                8 Ball Pool Shooter APK has many features that make it fun and easy to play. Here are some of them:

                -

                Multiplayer and PvP mode

                -

                You can play with other players online in real-time, either in 1-on-1 matches or in tournaments. You can also join clubs and chat with other members. You can earn coins and cash by winning games and use them to enter higher stakes matches or buy new items.

                -

                Different balls and table types

                -

                You can choose from different balls and table types to change the look and feel of the game. You can also unlock new balls and tables by completing missions or winning tournaments. Some of the balls and tables have special effects, such as fire, ice, or neon.

                -

                Customizable cues and avatars

                -

                You can customize your cue and avatar to show your personality and style. You can buy new cues and avatars with coins or cash, or win them as prizes. Some of the cues and avatars have special abilities, such as increased power, accuracy, or spin.

                -

                Leaderboards and achievements

                -

                You can track your progress and performance by checking the leaderboards and achievements. You can see how you rank among your friends, club members, or global players. You can also earn trophies and badges by completing challenges and milestones.

                -

                8 ball pool shooter apk download
                -8 ball pool shooter mod apk
                -8 ball pool shooter game
                -8 ball pool shooter hack apk
                -8 ball pool shooter online
                -8 ball pool shooter free apk
                -8 ball pool shooter unlimited coins apk
                -8 ball pool shooter app
                -8 ball pool shooter for android
                -8 ball pool shooter latest version apk
                -8 ball pool shooter offline apk
                -8 ball pool shooter pro apk
                -8 ball pool shooter cheats apk
                -8 ball pool shooter multiplayer apk
                -8 ball pool shooter pvp apk
                -8 ball pool shooter premium apk
                -8 ball pool shooter best apk
                -8 ball pool shooter android game
                -8 ball pool shooter no ads apk
                -8 ball pool shooter full apk
                -8 ball pool shooter cracked apk
                -8 ball pool shooter tips and tricks
                -8 ball pool shooter gameplay
                -8 ball pool shooter review
                -8 ball pool shooter features
                -8 ball pool shooter guide
                -8 ball pool shooter tutorial
                -8 ball pool shooter strategy
                -8 ball pool shooter rules
                -8 ball pool shooter levels
                -8 ball pool shooter challenges
                -8 ball pool shooter rewards
                -8 ball pool shooter tournaments
                -8 ball pool shooter leaderboard
                -8 ball pool shooter ranking
                -8 ball pool shooter skills
                -8 ball pool shooter fun apk
                -8 ball pool shooter realistic apk
                -8 ball pool shooter simulator apk
                -8 ball pool shooter physics apk
                -8 ball pool shooter graphics apk
                -8 ball pool shooter sound effects apk
                -8 ball pool shooter music apk
                -8 ball pool shooter themes apk
                -8 ball pool shooter customizations apk
                -8 ball pool shooter updates apk
                -8 ball pool shooter bugs fixes apk
                -8 ball pool shooter support apk
                -8 ball pool shooter feedback apk

                -

                Chat and emojis

                -

                You can communicate with your opponents or friends by using chat messages or emojis. You can send compliments, taunts, or jokes to spice up the game. You can also use emojis to express your emotions or reactions.

                -

                How to Download and Install 8 Ball Pool Shooter APK?

                -

                If you want to play 8 Ball Pool Shooter APK on your phone, you need to download and install it first. Here are the steps you need to follow:

                -

                Step 1: Enable unknown sources

                -

                Since 8 Ball Pool Shooter APK is not available on the Google Play Store, you need to enable unknown sources on your phone. This will allow you to install apps from sources other than the official store. To do this, go to your phone settings, then security, then toggle on the unknown sources option.

                -

                Step 2: Download the APK file

                -

                Next, you need to download the APK file of 8 Ball Pool Shooter APK. You can find it on various websites that offer APK downloads, such as APKPure, APKMirror, or APKMonk. Make sure you download the latest version of the app and check the file size and permissions before downloading.

                -

                Step 3: Install the APK file

                -

                Once you have downloaded the APK file, you need to install it on your phone. To do this, locate the file in your downloads folder or notification bar and tap on it. You will see a prompt asking you to confirm the installation. Tap on install and wait for the process to finish.

                -

                Step 4: Launch the game and enjoy

                -

                After the installation is complete, you can launch the game and start playing. You will see a welcome screen where you can sign in with your Facebook account or play as a guest. You can also choose your language and region. Then, you can access the main menu where you can select your game mode, customize your profile, or join a club.

                -

                Pros and Cons of 8 Ball Pool Shooter APK

                -

                Like any other app, 8 Ball Pool Shooter APK has its pros and cons. Here are some of them:

                -

                Pros

                -
                  -
                • Free and easy to play: You don't need to pay anything to download or play 8 Ball Pool Shooter APK. You also don't need any special skills or equipment to play pool on your phone. You just need to swipe your finger to aim and shoot.
                • -
                • Realistic physics and graphics: 8 Ball Pool Shooter APK has realistic physics and graphics that make the game more enjoyable and immersive. You can see the balls roll, bounce, and spin as they would on a real table. You can also see the details of the cues, tables, and backgrounds.
                • -
                • Fun and challenging gameplay: 8 Ball Pool Shooter APK has fun and challenging gameplay that keeps you hooked and entertained. You can play with different rules, modes, and levels of difficulty. You can also face different opponents with different skills and styles.
                • -
                • Social and interactive features: 8 Ball Pool Shooter APK has social and interactive features that make the game more lively and engaging. You can chat with your opponents or friends, send emojis, join clubs, or compete in tournaments. You can also share your achievements or challenges on social media.
                • -
                -

                Cons

                -
                  -
                • Requires internet connection: You need to have a stable internet connection to play 8 Ball Pool Shooter APK online. If your connection is slow or unstable, you may experience lagging, freezing, or disconnecting issues. This can affect your gameplay and enjoyment.
                • -
                • May contain ads and in-app purchases: 8 Ball Pool Shooter APK may contain ads and in-app purchases that can be annoying or tempting. Ads may pop up during or after the game, which can interrupt your flow or mood. In-app purchases may offer you items or features that can enhance your game, but they may also cost you real money.
                • -
                • May not be compatible with some devices: 8 Ball Pool Shooter APK may not be compatible with some devices due to their specifications or operating systems. Some devices may not support the app or run it smoothly. Some devices may also have issues with the screen size or orientation.
                • -
                -

                Conclusion

                -

                8 Ball Pool Shooter APK is a fun and easy way to play pool on your phone. It has many features that make it realistic, challenging, and social. It is free and easy to download and install, but it also has some drawbacks that you should be aware of. If you love playing pool but don't have access to a real table, you should give 8 Ball Pool Shooter APK a try.

                -

                Frequently Asked Questions (FAQs)

                -
                  -
                1. What is the difference between 8 ball and 9 ball mode?
                2. -

                  8 ball and 9 ball are two different types of pool games. In 8 ball mode, you have to pot all your balls (either solids or stripes) and then the 8 ball to win. In 9 ball mode, you have to pot the balls in numerical order, from 1 to 9, to win.

                  -
                3. How can I get more coins and cash in 8 Ball Pool Shooter APK?
                4. -

                  You can get more coins and cash by winning games, completing missions, or participating in tournaments. You can also get free coins and cash by watching ads, spinning the wheel, or opening chests. Alternatively, you can buy coins and cash with real money through in-app purchases.

                  -
                5. How can I join or create a club in 8 Ball Pool Shooter APK?
                6. -

                  You can join or create a club by tapping on the club icon on the main menu. You can search for existing clubs by name, region, or level. You can also create your own club by choosing a name, logo, and description. You can invite your friends or other players to join your club.

                  -
                7. How can I change my cue, avatar, or table in 8 Ball Pool Shooter APK?
                8. -

                  You can change your cue, avatar, or table by tapping on the profile icon on the main menu. You can see your collection of cues, avatars, and tables that you have unlocked or bought. You can select the ones you want to use and apply them to your game.

                  -
                9. How can I report a bug or a problem in 8 Ball Pool Shooter APK?
                10. -

                  You can report a bug or a problem in 8 Ball Pool Shooter APK by tapping on the settings icon on the main menu. You can see the option to contact us, where you can send an email to the developers. You can describe your issue and attach a screenshot if possible.

                  -

                401be4b1e0
                -
                -
                \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Candy Crush Soda Saga MOD APK How to Play the Old Version with No Limits.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Candy Crush Soda Saga MOD APK How to Play the Old Version with No Limits.md deleted file mode 100644 index e9e2b8c654943bcdacaaaa5555a2f88d49881add..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Candy Crush Soda Saga MOD APK How to Play the Old Version with No Limits.md +++ /dev/null @@ -1,131 +0,0 @@ -
                -

                Candy Crush Soda Saga Old Version Mod APK: How to Download and Play

                -

                Do you love playing Candy Crush Soda Saga, but wish you could have more lives, moves, boosters, and coins? Do you want to experience the game as it was before the updates and changes? If so, you might be interested in downloading and installing the old version mod apk of Candy Crush Soda Saga. In this article, we will tell you everything you need to know about this mod apk, including what it is, why you should use it, how to get it, and how to play with it. Let's get started!

                -

                What is Candy Crush Soda Saga?

                -

                A popular match-three puzzle game with soda-themed levels

                -

                Candy Crush Soda Saga is one of the most popular games in the world, with over 100 million downloads on Google Play. It is a match-three puzzle game, where you have to swap and match candies of the same color to clear them from the board. The game has hundreds of levels, each with a different goal and a unique soda-themed design. Some levels have soda bottles that you have to pop to fill the board with soda, some have honey that you have to free the bears from, some have bubble gum that you have to spread, and some have chocolate that you have to break. The game is fun, colorful, and addictive, and it will keep you entertained for hours.

                -

                candy crush soda saga old version mod apk


                DOWNLOADhttps://bltlly.com/2uOiA2



                -

                The features and benefits of playing Candy Crush Soda Saga

                -

                Some of the features and benefits of playing Candy Crush Soda Saga are:

                -
                  -
                • You can explore a sweet and fizzy world full of candy characters, such as Kimmy, Tiffi, Mr. Toffee, Yeti, and more.
                • -
                • You can enjoy various game modes, such as soda, frosting, bubble, honeycomb, jam, and more.
                • -
                • You can unlock and use powerful boosters, such as striped candy, wrapped candy, color bomb, fish candy, hammer, switch, bomb, lollipop, and more.
                • -
                • You can compete with your friends and other players around the world on the leaderboards and in events.
                • -
                • You can sync your game progress across multiple devices using Facebook or a King account.
                • -
                • You can play offline without an internet connection.
                • -
                -

                Why download the old version mod apk of Candy Crush Soda Saga?

                -

                The advantages of using the mod apk over the official version

                -

                The old version mod apk of Candy Crush Soda Saga is a modified version of the game that has some extra features and advantages over the official version. Some of these are:

                -
                  -
                • You can access all the levels and episodes that were available in the old version of the game.
                • -
                • You can have

                  unlimited lives, moves, boosters, and coins to play the game without any limitations or interruptions.

                • -
                • You can use a mod menu to enable or disable various cheats, such as auto win, score multiplier, infinite time, and more.
                • -
                • You can enjoy the game without any ads or pop-ups.
                • -
                -

                The disadvantages and risks of using the mod apk

                -

                However, using the old version mod apk of Candy Crush Soda Saga also has some disadvantages and risks that you should be aware of. Some of these are:

                -
                  -
                • You may not be able to access the latest features, updates, and events that are available in the official version of the game.
                • -
                • You may encounter some bugs, errors, or crashes while playing the game with the mod apk.
                • -
                • You may violate the terms and conditions of the game and risk getting banned or suspended from playing.
                • -
                • You may expose your device to malware, viruses, or other harmful software that may damage your data or privacy.
                • -
                -

                How to download and install the old version mod apk of Candy Crush Soda Saga?

                -

                The steps to download the mod apk from a reliable source

                -

                If you decide to download and install the old version mod apk of Candy Crush Soda Saga, you need to follow these steps carefully:

                -
                  -
                1. Find a reliable and trustworthy source that offers the mod apk file for download. You can search online for reviews, ratings, and feedback from other users who have used the mod apk before.
                2. -
                3. Click on the download link or button and wait for the mod apk file to be downloaded on your device. The file size may vary depending on the version and features of the mod apk.
                4. -
                5. Before installing the mod apk file, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
                6. -
                -

                The steps to install the mod apk on your Android device

                -

                After downloading the mod apk file, you need to follow these steps to install it on your Android device:

                -
                  -
                1. Locate the mod apk file on your device using a file manager app or your browser's downloads folder.
                2. -
                3. Tap on the mod apk file and follow the instructions on the screen to install it. You may need to grant some permissions or accept some terms and conditions during the installation process.
                4. -
                5. Once the installation is complete, you can launch the game from your app drawer or home screen. You may need to allow some additional permissions or settings for the game to run properly.
                6. -
                -

                How to play Candy Crush Soda Saga with the mod apk?

                -

                The features and functions of the mod menu

                -

                When you play Candy Crush Soda Saga with the mod apk, you will notice a mod menu icon on the top left corner of the screen. This is where you can access and customize various cheats and hacks for the game. Some of the features and functions of the mod menu are:

                -
                  -
                • You can enable or disable unlimited lives, moves, boosters, and coins by tapping on their respective buttons.
                • -
                • You can adjust the score multiplier by sliding the bar left or right. The higher the multiplier, the more points you will get for each match.
                • -
                • You can activate auto win by tapping on its button. This will make you win any level instantly without playing.
                • -
                • You can set infinite time by tapping on its button. This will make you play any level without worrying about running out of time.
                • -
                • You can hide or show the mod menu by tapping on its button. This will make it more discreet or convenient for you to use.
                • -
                -

                The tips and tricks to enjoy the game with the mod apk

                -

                Playing Candy Crush Soda Saga with the mod apk can be fun and easy, but you should also follow some tips and tricks to enjoy the game more. Some of these are:

                -

                candy crush soda saga hack apk unlimited moves
                -candy crush soda saga modded apk free download
                -candy crush soda saga old mod apk unlimited everything
                -candy crush soda saga hack mod apk latest version
                -candy crush soda saga mod apk offline no root
                -candy crush soda saga mod apk unlimited gold bars
                -candy crush soda saga hacked apk download for android
                -candy crush soda saga mod apk all levels unlocked
                -candy crush soda saga old version hack apk
                -candy crush soda saga mod apk unlimited lives and boosters
                -candy crush soda saga cracked apk free download
                -candy crush soda saga mod apk no ads
                -candy crush soda saga old mod apk download
                -candy crush soda saga hack mod apk android 1
                -candy crush soda saga mod apk unlimited everything 2021
                -candy crush soda saga cheat mod apk download
                -candy crush soda saga modded apk latest version
                -candy crush soda saga old hack apk download
                -candy crush soda saga mod apk revdl
                -candy crush soda saga hack apk 2020 download
                -candy crush soda saga mod apk unlimited moves and lives
                -candy crush soda saga hacked version apk free download
                -candy crush soda saga old version modded apk
                -candy crush soda saga hack mod apk 2021 download
                -candy crush soda saga mod apk rexdl
                -candy crush soda saga hack apk unlimited gold bars and lives
                -candy crush soda saga modded version apk download
                -candy crush soda saga old version hack mod apk
                -candy crush soda saga mod apk happymod
                -candy crush soda saga hack apk latest version download
                -candy crush soda saga mod apk unlimited moves and boosters
                -candy crush soda saga hacked game apk download
                -candy crush soda saga old version cracked apk
                -candy crush soda saga hack mod apk free download for android
                -candy crush soda saga mod apk apkpure

                -
                  -
                • Use the mod apk wisely and moderately. Don't abuse the cheats and hacks too much, as they may ruin the challenge and fun of the game. Also, don't use them in online modes or events, as they may get you detected and banned by the game developers.
                • -
                • Try to play the old version mod apk of Candy Crush Soda Saga with friends who also have the mod apk. This way, you can share the same features and benefits, and have more fun together.
                • -
                • Explore the different levels and episodes that are available in the old version of the game. You may find some levels that are more interesting, challenging, or rewarding than the ones in the new version.
                • -
                • Backup your game data before using the mod apk. This way, you can restore your game progress if something goes wrong with the mod apk or if you want to switch back to the official version.
                • -
                -

                Conclusion

                -

                Candy Crush Soda Saga is a great game that can keep you entertained for hours. However, if you want to experience the game as it was before the updates and changes, or if you want to have more lives, moves, boosters, and coins, you can try downloading and installing the old version mod apk of Candy Crush Soda Saga. This mod apk will give you access to all the levels and episodes that were available in the old version, as well as some cheats and hacks that will make the game easier and more fun. However, you should also be careful of the disadvantages and risks of using the mod apk, such as missing out on the latest features, encountering bugs, violating the terms and conditions, or exposing your device to malware. Therefore, you should use the mod apk wisely and moderately, and follow the steps and tips that we have provided in this article. We hope that this article has helped you learn more about Candy Crush Soda Saga old version mod apk, how to download and play it, and what to expect from it. If you have any questions or comments, feel free to leave them below. Happy gaming!

                -

                FAQs

                -

                Q1: Is the old version mod apk of Candy Crush Soda Saga safe to use?

                -

                A1: The old version mod apk of Candy Crush Soda Saga is not officially endorsed or supported by the game developers, so it may not be safe to use. It may contain malware, viruses, or other harmful software that may damage your device or data. It may also violate the terms and conditions of the game and get you banned or suspended from playing. Therefore, you should use it at your own risk and discretion.

                -

                Q2: What are the differences between the old version and the new version of Candy Crush Soda Saga?

                -

                A2: The old version and the new version of Candy Crush Soda Saga have some differences in terms of levels, episodes, features, graphics, and performance. The old version has fewer levels and episodes than the new version, but some of them may be more challenging or rewarding. The old version also has some features that are not available in the new version, such as soda bottles that fill up the board with soda. The old version may have lower graphics quality and performance than the new version, but it may also run smoother on older devices.

                -

                Q3: Can I play Candy Crush Soda Saga with the mod apk online or offline?

                -

                A3: You can play Candy Crush Soda Saga with the mod apk both online and offline, depending on your preference and internet connection. However, you should be careful when playing online, as you may get detected and banned by the game developers for using the mod apk. You should also avoid using the cheats and hacks in online modes or events, as they may give you an unfair advantage over other players or ruin the game balance. Playing offline may be safer and more convenient, as you can enjoy the game without any interruptions or restrictions.

                -

                Q4: How can I update the old version mod apk of Candy Crush Soda Saga?

                -

                A4: The old version mod apk of Candy Crush Soda Saga may not be compatible with the latest updates and features of the official version of the game. Therefore, you may need to update the mod apk regularly to keep up with the changes and improvements of the game. To update the mod apk, you can follow these steps:

                -
                  -
                1. Check if there is a newer version of the mod apk available from the same source that you downloaded it from. You can also search online for other sources that offer updated versions of the mod apk.
                2. -
                3. Download the updated version of the mod apk file on your device.
                4. -
                5. Uninstall the old version of the mod apk from your device.
                6. -
                7. Install the updated version of the mod apk on your device following the same steps that we have provided above.
                8. -
                9. Launch the game and enjoy the new features and functions of the mod apk.
                10. -
                -

                Q5: Where can I find more information about Candy Crush Soda Saga and its mod apk?

                -

                A5: If you want to learn more about Candy Crush Soda Saga and its mod apk, you can visit some of these websites and resources:

                -
                  -
                • The official website of Candy Crush Soda Saga: [https://candycrushsodasaga.com/]
                • -
                • The official Facebook page of Candy Crush Soda Saga: [https://www.facebook.com/CandyCrushSodaSaga/]
                • -
                • The official YouTube channel of Candy Crush Soda Saga: [https://www.youtube.com/user/CandyCrushOfficial]
                • -
                • A reliable source for downloading the old version mod apk of Candy Crush Soda Saga: [https://android-1.com/en/186-candy-crush-soda-saga-mod.html]
                • -
                • A helpful guide for using the old version mod apk of Candy Crush Soda Saga: [https://www.youtube.com/watch?v=ZwZw0xO7y0A]
                • -

                401be4b1e0
                -
                -
                \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Google Play Apps as APK Files - The Best Tools and Methods.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Google Play Apps as APK Files - The Best Tools and Methods.md deleted file mode 100644 index 33f5dfb417fbe3a59c8f54c3b1f58520f0d4f5c2..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Google Play Apps as APK Files - The Best Tools and Methods.md +++ /dev/null @@ -1,134 +0,0 @@ - -

                How to Download APK Files from Google Play

                -

                Do you want to download APK versions of your favorite Google Play Store apps? Although the Play Store doesn't give you the option to download APKs directly, you can use some methods to save and install APK files from Play Store URLs. In this article, we will show you how to download and extract APK files for Google Play apps on your desktop or Android device.

                -

                What is an APK File and Why You Might Need It

                -

                An APK file is an Android application package that contains all the files and resources needed to run an app on your device

                -

                An APK file is like a ZIP file that contains all the components of an Android app, such as code, images, sounds, etc. When you install an app from the Play Store, you are actually downloading and installing an APK file to your device. However, you can't access or view the APK file directly.

                -

                download apk from google play


                DOWNLOADhttps://bltlly.com/2uOnOD



                -

                You might need to download an APK file if you want to install an app that is not available in your region, update an app before it is officially released, or backup an app that you have installed

                -

                There are some reasons why you might want to download an APK file instead of installing an app from the Play Store. For example:

                -
                  -
                • You want to install an app that is not available in your country or region due to geo-restrictions or licensing issues.
                • -
                • You want to update an app before it is officially released in your area or try out beta versions of new features.
                • You want to backup an app that you have installed from the Play Store in case you need to restore it later or share it with someone else.
                • -
                -

                In these cases, you can download the APK file of the app and install it manually on your device or another device. However, you should be careful about the source of the APK file and only download it from trusted websites or apps.

                -

                How to Download APK Files from Google Play on Desktop

                -

                Use a web tool to download APK files by pasting Google Play Store URLs

                -

                One of the easiest ways to download APK files from Google Play is to use a web tool that can generate download links for any Play Store URL. Here are the steps to follow:

                -
                  -
                1. Go to play.google.com and find the app or game you want to download.
                2. -
                3. Copy the URL from the address bar of your browser.
                4. -
                5. Go to apkcombo.com/downloader/ or another APK downloader website.
                6. -
                7. Paste the URL in the top text box and select a device type.
                8. -
                9. Click the Generate Download Link button and then the Click here to download button.
                10. -
                -

                You will get a ZIP file that contains the APK file and some additional files. You can extract the ZIP file and use the APK file as you wish.

                -

                Use a Chrome or Firefox extension to download APK files directly from Google Play Store

                -

                If you don't want to use a web tool, you can also use a browser extension that can download APK files directly from the Play Store. Here are the steps to follow:

                -
                  -
                1. Install APK Downloader for Google Play extension for Chrome or Firefox.
                2. -
                3. Go to play.google.com and find the app or game you want to download.
                4. -
                5. Click on the extension icon and select Download APK.
                6. -
                7. Choose a location to save the APK file on your computer.
                8. -
                -

                You will get an APK file that you can use as you wish.

                -

                download apk from google play on pc
                -download apk from google play without device id
                -download apk from google play to computer
                -download apk from google play link
                -download apk from google play online
                -download apk from google play store free
                -download apk from google play using chrome extension
                -download apk from google play without account
                -download apk from google play with obb
                -download apk from google play using idm
                -download apk from google play via pc
                -download apk from google play by url
                -download apk from google play in browser
                -download apk from google play as zip
                -download apk from google play using python
                -download apk from google play on mac
                -download apk from google play without vpn
                -download apk from google play to sd card
                -download apk from google play using curl
                -download apk from google play on linux
                -download apk from google play with split apks installer
                -download apk from google play by package name
                -download apk from google play in pc online
                -download apk from google play as xapk
                -download apk from google play using wget
                -download apk from google play on iphone
                -download apk from google play without app id
                -download apk from google play to usb
                -download apk from google play using terminal
                -download apk from google play on android tv
                -download apk from google play with lucky patcher
                -download apk from google play by app name
                -download apk from google play in mobile browser
                -download apk from google play as pdf
                -download apk from google play using firefox addon
                -download apk from google play on fire tablet
                -download apk from google play without country restriction
                -download apk from google play to laptop
                -download apk from google play using php script
                -download apk from google play on chromebook

                -

                How to Download APK Files from Google Play on Android

                -

                Use an APK extractor app to get the APK files from apps and games you already installed from Google Play Store

                -

                If you want to get the APK files of the apps and games that you already have on your Android device, you can use an APK extractor app that can scan and save the APK files from your installed apps. Here are the steps to follow:

                -
                  -
                1. Download App APK Extractor & Analyzer from Google Play Store.
                2. -
                3. Open the app and select the app or game you want to get the APK for.
                4. -
                5. Tap Extract App from the pop-up menu and choose a location to save the APK file on your device.
                6. -
                -

                You will get an APK file that you can use as you wish.

                -

                How to Install APK Files on Android

                -

                Enable unknown sources on your device settings

                -

                Before you can install an APK file on your Android device, you need to enable unknown sources in your device settings. This will allow you to install apps from sources other than the Play Store. Here are the steps to follow:

                -
                  -
                1. Go to Settings > Security > Unknown sources and toggle it on.
                2. -
                3. You might see a warning message that installing apps from unknown sources can harm your device. Tap OK to proceed.
                4. -
                -

                Locate the APK file on your device or computer and transfer it if needed

                -

                If you downloaded the APK file on your device, you can use a file manager app to find it in your downloads folder or wherever you saved it. If you downloaded it on your computer, you need to transfer it to your device using a USB cable or a wireless method such as Bluetooth or Wi-Fi Direct.

                -

                Tap on the APK file and follow the installation prompts

                -

                Once you have the APK file on your device, you can tap on it to start the installation process. You might see some permissions requests that ask you to allow the app to access certain features or data on your device. Tap Install when asked and wait for the installation to complete. You can then open and use the app as usual.

                -

                Conclusion

                -

                In this article, we showed you how to download and install APK files from Google Play on your desktop or Android device. You can use these methods to get apps that are not available in your region, update them before they are officially released, or backup them for future use. However, you should always be careful about the source and the security of the APK files you download and install. We hope you found this article helpful and learned something new. If you have any questions or feedback, please let us know in the comments below.

                -

                FAQs

                -

                What are the benefits of downloading APK files from Google Play?

                -

                Some of the benefits of downloading APK files from Google Play are:

                -
                  -
                • You can access apps that are not available in your region or country due to geo-restrictions or licensing issues.
                • -
                • You can update apps before they are officially released in your area or try out beta versions of new features.
                • -
                • You can backup apps that you have installed from the Play Store in case you need to restore them later or share them with someone else.
                • -
                -

                What are the risks of downloading APK files from Google Play?

                -

                Some of the risks of downloading APK files from Google Play are:

                -
                  -
                • You might download malicious or fake APK files that can harm your device or steal your data.
                • -
                • You might violate the terms and conditions of the Play Store or the app developer by downloading and installing APK files.
                • -
                • You might not get the latest updates or support from the Play Store or the app developer by installing APK files.
                • -
                -

                How can I check if an APK file is safe to download and install?

                -

                There are some ways to check if an APK file is safe to download and install, such as:

                -
                  -
                • Use a reputable website or app to download APK files, such as apkcombo.com, apkpure.com, or App APK Extractor & Analyzer.
                • -
                • Scan the APK file with an antivirus or malware scanner app before installing it on your device.
                • -
                • Check the permissions and reviews of the app before installing it on your device.
                • -
                -

                How can I uninstall an APK file from my device?

                -

                To uninstall an APK file from your device, you can follow these steps:

                -
                  -
                1. Go to Settings > Apps & notifications > See all apps.
                2. -
                3. Find and tap on the app you want to uninstall.
                4. -
                5. Tap Uninstall and confirm your action.
                6. -
                -

                How can I update an APK file on my device?

                -

                To update an APK file on your device, you can follow these steps:

                -
                  -
                1. Download the latest version of the APK file from a trusted source.
                2. -
                3. Tap on the APK file and follow the installation prompts.
                4. -
                5. The new version will overwrite the old version and update the app on your device.
                6. -

                197e85843d
                -
                -
                \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Future Point Leo Star Professional Cracked Full Download WORK.md b/spaces/tioseFevbu/cartoon-converter/scripts/Future Point Leo Star Professional Cracked Full Download WORK.md deleted file mode 100644 index 71c122ca13d2774f94db8399a04b800b659acf44..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Future Point Leo Star Professional Cracked Full Download WORK.md +++ /dev/null @@ -1,20 +0,0 @@ - -

                How to Download Future Point Leo Star Professional for Free

                -

                Future Point Leo Star Professional is a popular astrological software that contains complete astrological calculations with remedies, predictions, various charts, dashas and more. It is used by many professional astrologers and enthusiasts who want to get accurate and detailed insights into their horoscopes.

                -

                future point leo star professional cracked full download


                Download Ziphttps://urlcod.com/2uHvjg



                -

                However, Future Point Leo Star Professional is not a free software. It requires a license key to activate and use its full features. The official website of Future Point offers different packages and prices for the software, ranging from $99 to $999 depending on the edition and modules you choose.

                -

                But what if you want to download Future Point Leo Star Professional for free? Is there a way to get a cracked version of the software without paying anything? The answer is yes, but it comes with some risks and drawbacks.

                -

                The Risks of Downloading Cracked Software

                -

                Downloading cracked software is illegal and unethical. It violates the intellectual property rights of the software developers and distributors. It also exposes your computer to malware, viruses, spyware and other harmful programs that can damage your system or steal your personal information.

                -

                -

                Moreover, downloading cracked software can compromise the quality and reliability of the software itself. You may encounter errors, bugs, crashes or missing features that can affect your astrological calculations and predictions. You may also miss out on the latest updates, patches and support from the official website.

                -

                Therefore, downloading cracked software is not worth the risk. You may end up paying more in terms of repairing your computer or buying a new one. You may also lose your credibility and reputation as an astrologer or a user of astrology software.

                -

                The Alternative Way to Download Future Point Leo Star Professional for Free

                -

                Fortunately, there is a legal and safe way to download Future Point Leo Star Professional for free. You can use the trial version of the software that is available on the official website of Future Point. The trial version allows you to use the software for 30 days without any limitations or restrictions.

                -

                The trial version is a great way to test the software and see if it meets your needs and expectations. You can explore all the features and functions of the software and compare it with other astrological software in the market. You can also generate reports, charts and predictions for yourself or your clients.

                -

                If you like the trial version and want to continue using it after 30 days, you can purchase a license key from the official website of Future Point. The license key will activate the full version of the software and enable you to use it for lifetime. You will also get access to the latest updates, patches and support from Future Point.

                -

                Conclusion

                -

                Future Point Leo Star Professional is a powerful and comprehensive astrological software that can help you with your horoscope analysis and prediction. However, downloading a cracked version of the software is illegal, unethical and risky. It can harm your computer and your reputation as an astrologer or a user of astrology software.

                -

                The best way to download Future Point Leo Star Professional for free is to use the trial version of the software that is available on the official website of Future Point. The trial version gives you 30 days to try out the software without any limitations or restrictions. If you like it, you can buy a license key from Future Point and enjoy the full version of the software for lifetime.

                e93f5a0c3f
                -
                -
                \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/_cell_widths.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/_cell_widths.py deleted file mode 100644 index 36286df379e28ea997bea3ee1fd62cadebebbba9..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/_cell_widths.py +++ /dev/null @@ -1,451 +0,0 @@ -# Auto generated by make_terminal_widths.py - -CELL_WIDTHS = [ - (0, 0, 0), - (1, 31, -1), - (127, 159, -1), - (768, 879, 0), - (1155, 1161, 0), - (1425, 1469, 0), - (1471, 1471, 0), - (1473, 1474, 0), - (1476, 1477, 0), - (1479, 1479, 0), - (1552, 1562, 0), - (1611, 1631, 0), - (1648, 1648, 0), - (1750, 1756, 0), - (1759, 1764, 0), - (1767, 1768, 0), - (1770, 1773, 0), - (1809, 1809, 0), - (1840, 1866, 0), - (1958, 1968, 0), - (2027, 2035, 0), - (2045, 2045, 0), - (2070, 2073, 0), - (2075, 2083, 0), - (2085, 2087, 0), - (2089, 2093, 0), - (2137, 2139, 0), - (2259, 2273, 0), - (2275, 2306, 0), - (2362, 2362, 0), - (2364, 2364, 0), - (2369, 2376, 0), - (2381, 2381, 0), - (2385, 2391, 0), - (2402, 2403, 0), - (2433, 2433, 0), - (2492, 2492, 0), - (2497, 2500, 0), - (2509, 2509, 0), - (2530, 2531, 0), - (2558, 2558, 0), - (2561, 2562, 0), - (2620, 2620, 0), - (2625, 2626, 0), - (2631, 2632, 0), - (2635, 2637, 0), - (2641, 2641, 0), - (2672, 2673, 0), - (2677, 2677, 0), - (2689, 2690, 0), - (2748, 2748, 0), - (2753, 2757, 0), - (2759, 2760, 0), - (2765, 2765, 0), - (2786, 2787, 0), - (2810, 2815, 0), - (2817, 2817, 0), - (2876, 2876, 0), - (2879, 2879, 0), - (2881, 2884, 0), - (2893, 2893, 0), - (2901, 2902, 0), - (2914, 2915, 0), - (2946, 2946, 0), - (3008, 3008, 0), - (3021, 3021, 0), - (3072, 3072, 0), - (3076, 3076, 0), - (3134, 3136, 0), - (3142, 3144, 0), - (3146, 3149, 0), - (3157, 3158, 0), - (3170, 3171, 0), - (3201, 3201, 0), - (3260, 3260, 0), - (3263, 3263, 0), - (3270, 3270, 0), - (3276, 3277, 0), - (3298, 3299, 0), - (3328, 3329, 0), - (3387, 3388, 0), - (3393, 3396, 0), - (3405, 3405, 0), - (3426, 3427, 0), - (3457, 3457, 0), - (3530, 3530, 0), - (3538, 3540, 0), - (3542, 3542, 0), - (3633, 3633, 0), - (3636, 3642, 0), - (3655, 3662, 0), - (3761, 3761, 0), - (3764, 3772, 0), - (3784, 3789, 0), - (3864, 3865, 0), - (3893, 3893, 0), - (3895, 3895, 0), - (3897, 3897, 0), - (3953, 3966, 0), - (3968, 3972, 0), - (3974, 3975, 0), - (3981, 3991, 0), - (3993, 4028, 0), - (4038, 4038, 0), - (4141, 4144, 0), - (4146, 4151, 0), - (4153, 4154, 0), - (4157, 4158, 0), - (4184, 4185, 0), - (4190, 4192, 0), - (4209, 4212, 0), - (4226, 4226, 0), - (4229, 4230, 0), - (4237, 4237, 0), - (4253, 4253, 0), - (4352, 4447, 2), - (4957, 4959, 0), - (5906, 5908, 0), - (5938, 5940, 0), - (5970, 5971, 0), - (6002, 6003, 0), - (6068, 6069, 0), - (6071, 6077, 0), - (6086, 6086, 0), - (6089, 6099, 0), - (6109, 6109, 0), - (6155, 6157, 0), - (6277, 6278, 0), - (6313, 6313, 0), - (6432, 6434, 0), - (6439, 6440, 0), - (6450, 6450, 0), - (6457, 6459, 0), - (6679, 6680, 0), - (6683, 6683, 0), - (6742, 6742, 0), - (6744, 6750, 0), - (6752, 6752, 0), - (6754, 6754, 0), - (6757, 6764, 0), - (6771, 6780, 0), - (6783, 6783, 0), - (6832, 6848, 0), - (6912, 6915, 0), - (6964, 6964, 0), - (6966, 6970, 0), - (6972, 6972, 0), - (6978, 6978, 0), - (7019, 7027, 0), - (7040, 7041, 0), - (7074, 7077, 0), - (7080, 7081, 0), - (7083, 7085, 0), - (7142, 7142, 0), - (7144, 7145, 0), - (7149, 7149, 0), - (7151, 7153, 0), - (7212, 7219, 0), - (7222, 7223, 0), - (7376, 7378, 0), - (7380, 7392, 0), - (7394, 7400, 0), - (7405, 7405, 0), - (7412, 7412, 0), - (7416, 7417, 0), - (7616, 7673, 0), - (7675, 7679, 0), - (8203, 8207, 0), - (8232, 8238, 0), - (8288, 8291, 0), - (8400, 8432, 0), - (8986, 8987, 2), - (9001, 9002, 2), - (9193, 9196, 2), - (9200, 9200, 2), - (9203, 9203, 2), - (9725, 9726, 2), - (9748, 9749, 2), - (9800, 9811, 2), - (9855, 9855, 2), - (9875, 9875, 2), - (9889, 9889, 2), - (9898, 9899, 2), - (9917, 9918, 2), - (9924, 9925, 2), - (9934, 9934, 2), - (9940, 9940, 2), - (9962, 9962, 2), - (9970, 9971, 2), - (9973, 9973, 2), - (9978, 9978, 2), - (9981, 9981, 2), - (9989, 9989, 2), - (9994, 9995, 2), - (10024, 10024, 2), - (10060, 10060, 2), - (10062, 10062, 2), - (10067, 10069, 2), - (10071, 10071, 2), - (10133, 10135, 2), - (10160, 10160, 2), - (10175, 10175, 2), - (11035, 11036, 2), - (11088, 11088, 2), - (11093, 11093, 2), - (11503, 11505, 0), - (11647, 11647, 0), - (11744, 11775, 0), - (11904, 11929, 2), - (11931, 12019, 2), - (12032, 12245, 2), - (12272, 12283, 2), - (12288, 12329, 2), - (12330, 12333, 0), - (12334, 12350, 2), - (12353, 12438, 2), - (12441, 12442, 0), - (12443, 12543, 2), - (12549, 12591, 2), - (12593, 12686, 2), - (12688, 12771, 2), - (12784, 12830, 2), - (12832, 12871, 2), - (12880, 19903, 2), - (19968, 42124, 2), - (42128, 42182, 2), - (42607, 42610, 0), - (42612, 42621, 0), - (42654, 42655, 0), - (42736, 42737, 0), - (43010, 43010, 0), - (43014, 43014, 0), - (43019, 43019, 0), - (43045, 43046, 0), - (43052, 43052, 0), - (43204, 43205, 0), - (43232, 43249, 0), - (43263, 43263, 0), - (43302, 43309, 0), - (43335, 43345, 0), - (43360, 43388, 2), - (43392, 43394, 0), - (43443, 43443, 0), - (43446, 43449, 0), - (43452, 43453, 0), - (43493, 43493, 0), - (43561, 43566, 0), - (43569, 43570, 0), - (43573, 43574, 0), - (43587, 43587, 0), - (43596, 43596, 0), - (43644, 43644, 0), - (43696, 43696, 0), - (43698, 43700, 0), - (43703, 43704, 0), - (43710, 43711, 0), - (43713, 43713, 0), - (43756, 43757, 0), - (43766, 43766, 0), - (44005, 44005, 0), - (44008, 44008, 0), - (44013, 44013, 0), - (44032, 55203, 2), - (63744, 64255, 2), - (64286, 64286, 0), - (65024, 65039, 0), - (65040, 65049, 2), - (65056, 65071, 0), - (65072, 65106, 2), - (65108, 65126, 2), - (65128, 65131, 2), - (65281, 65376, 2), - (65504, 65510, 2), - (66045, 66045, 0), - (66272, 66272, 0), - (66422, 66426, 0), - (68097, 68099, 0), - (68101, 68102, 0), - (68108, 68111, 0), - (68152, 68154, 0), - (68159, 68159, 0), - (68325, 68326, 0), - (68900, 68903, 0), - (69291, 69292, 0), - (69446, 69456, 0), - (69633, 69633, 0), - (69688, 69702, 0), - (69759, 69761, 0), - (69811, 69814, 0), - (69817, 69818, 0), - (69888, 69890, 0), - (69927, 69931, 0), - (69933, 69940, 0), - (70003, 70003, 0), - (70016, 70017, 0), - (70070, 70078, 0), - (70089, 70092, 0), - (70095, 70095, 0), - (70191, 70193, 0), - (70196, 70196, 0), - (70198, 70199, 0), - (70206, 70206, 0), - (70367, 70367, 0), - (70371, 70378, 0), - (70400, 70401, 0), - (70459, 70460, 0), - (70464, 70464, 0), - (70502, 70508, 0), - (70512, 70516, 0), - (70712, 70719, 0), - (70722, 70724, 0), - (70726, 70726, 0), - (70750, 70750, 0), - (70835, 70840, 0), - (70842, 70842, 0), - (70847, 70848, 0), - (70850, 70851, 0), - (71090, 71093, 0), - (71100, 71101, 0), - (71103, 71104, 0), - (71132, 71133, 0), - (71219, 71226, 0), - (71229, 71229, 0), - (71231, 71232, 0), - (71339, 71339, 0), - (71341, 71341, 0), - (71344, 71349, 0), - (71351, 71351, 0), - (71453, 71455, 0), - (71458, 71461, 0), - (71463, 71467, 0), - (71727, 71735, 0), - (71737, 71738, 0), - (71995, 71996, 0), - (71998, 71998, 0), - (72003, 72003, 0), - (72148, 72151, 0), - (72154, 72155, 0), - (72160, 72160, 0), - (72193, 72202, 0), - (72243, 72248, 0), - (72251, 72254, 0), - (72263, 72263, 0), - (72273, 72278, 0), - (72281, 72283, 0), - (72330, 72342, 0), - (72344, 72345, 0), - (72752, 72758, 0), - (72760, 72765, 0), - (72767, 72767, 0), - (72850, 72871, 0), - (72874, 72880, 0), - (72882, 72883, 0), - (72885, 72886, 0), - (73009, 73014, 0), - (73018, 73018, 0), - (73020, 73021, 0), - (73023, 73029, 0), - (73031, 73031, 0), - (73104, 73105, 0), - (73109, 73109, 0), - (73111, 73111, 0), - (73459, 73460, 0), - (92912, 92916, 0), - (92976, 92982, 0), - (94031, 94031, 0), - (94095, 94098, 0), - (94176, 94179, 2), - (94180, 94180, 0), - (94192, 94193, 2), - (94208, 100343, 2), - (100352, 101589, 2), - (101632, 101640, 2), - (110592, 110878, 2), - (110928, 110930, 2), - (110948, 110951, 2), - (110960, 111355, 2), - (113821, 113822, 0), - (119143, 119145, 0), - (119163, 119170, 0), - (119173, 119179, 0), - (119210, 119213, 0), - (119362, 119364, 0), - (121344, 121398, 0), - (121403, 121452, 0), - (121461, 121461, 0), - (121476, 121476, 0), - (121499, 121503, 0), - (121505, 121519, 0), - (122880, 122886, 0), - (122888, 122904, 0), - (122907, 122913, 0), - (122915, 122916, 0), - (122918, 122922, 0), - (123184, 123190, 0), - (123628, 123631, 0), - (125136, 125142, 0), - (125252, 125258, 0), - (126980, 126980, 2), - (127183, 127183, 2), - (127374, 127374, 2), - (127377, 127386, 2), - (127488, 127490, 2), - (127504, 127547, 2), - (127552, 127560, 2), - (127568, 127569, 2), - (127584, 127589, 2), - (127744, 127776, 2), - (127789, 127797, 2), - (127799, 127868, 2), - (127870, 127891, 2), - (127904, 127946, 2), - (127951, 127955, 2), - (127968, 127984, 2), - (127988, 127988, 2), - (127992, 128062, 2), - (128064, 128064, 2), - (128066, 128252, 2), - (128255, 128317, 2), - (128331, 128334, 2), - (128336, 128359, 2), - (128378, 128378, 2), - (128405, 128406, 2), - (128420, 128420, 2), - (128507, 128591, 2), - (128640, 128709, 2), - (128716, 128716, 2), - (128720, 128722, 2), - (128725, 128727, 2), - (128747, 128748, 2), - (128756, 128764, 2), - (128992, 129003, 2), - (129292, 129338, 2), - (129340, 129349, 2), - (129351, 129400, 2), - (129402, 129483, 2), - (129485, 129535, 2), - (129648, 129652, 2), - (129656, 129658, 2), - (129664, 129670, 2), - (129680, 129704, 2), - (129712, 129718, 2), - (129728, 129730, 2), - (129744, 129750, 2), - (131072, 196605, 2), - (196608, 262141, 2), - (917760, 917999, 0), -] diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/_win32_console.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/_win32_console.py deleted file mode 100644 index 81b1082905338a74b72b9de432ece50a456687bc..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/_win32_console.py +++ /dev/null @@ -1,662 +0,0 @@ -"""Light wrapper around the Win32 Console API - this module should only be imported on Windows - -The API that this module wraps is documented at https://docs.microsoft.com/en-us/windows/console/console-functions -""" -import ctypes -import sys -from typing import Any - -windll: Any = None -if sys.platform == "win32": - windll = ctypes.LibraryLoader(ctypes.WinDLL) -else: - raise ImportError(f"{__name__} can only be imported on Windows") - -import time -from ctypes import Structure, byref, wintypes -from typing import IO, NamedTuple, Type, cast - -from pip._vendor.rich.color import ColorSystem -from pip._vendor.rich.style import Style - -STDOUT = -11 -ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4 - -COORD = wintypes._COORD - - -class LegacyWindowsError(Exception): - pass - - -class WindowsCoordinates(NamedTuple): - """Coordinates in the Windows Console API are (y, x), not (x, y). - This class is intended to prevent that confusion. - Rows and columns are indexed from 0. - This class can be used in place of wintypes._COORD in arguments and argtypes. - """ - - row: int - col: int - - @classmethod - def from_param(cls, value: "WindowsCoordinates") -> COORD: - """Converts a WindowsCoordinates into a wintypes _COORD structure. - This classmethod is internally called by ctypes to perform the conversion. - - Args: - value (WindowsCoordinates): The input coordinates to convert. - - Returns: - wintypes._COORD: The converted coordinates struct. - """ - return COORD(value.col, value.row) - - -class CONSOLE_SCREEN_BUFFER_INFO(Structure): - _fields_ = [ - ("dwSize", COORD), - ("dwCursorPosition", COORD), - ("wAttributes", wintypes.WORD), - ("srWindow", wintypes.SMALL_RECT), - ("dwMaximumWindowSize", COORD), - ] - - -class CONSOLE_CURSOR_INFO(ctypes.Structure): - _fields_ = [("dwSize", wintypes.DWORD), ("bVisible", wintypes.BOOL)] - - -_GetStdHandle = windll.kernel32.GetStdHandle -_GetStdHandle.argtypes = [ - wintypes.DWORD, -] -_GetStdHandle.restype = wintypes.HANDLE - - -def GetStdHandle(handle: int = STDOUT) -> wintypes.HANDLE: - """Retrieves a handle to the specified standard device (standard input, standard output, or standard error). - - Args: - handle (int): Integer identifier for the handle. Defaults to -11 (stdout). - - Returns: - wintypes.HANDLE: The handle - """ - return cast(wintypes.HANDLE, _GetStdHandle(handle)) - - -_GetConsoleMode = windll.kernel32.GetConsoleMode -_GetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.LPDWORD] -_GetConsoleMode.restype = wintypes.BOOL - - -def GetConsoleMode(std_handle: wintypes.HANDLE) -> int: - """Retrieves the current input mode of a console's input buffer - or the current output mode of a console screen buffer. - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - - Raises: - LegacyWindowsError: If any error occurs while calling the Windows console API. - - Returns: - int: Value representing the current console mode as documented at - https://docs.microsoft.com/en-us/windows/console/getconsolemode#parameters - """ - - console_mode = wintypes.DWORD() - success = bool(_GetConsoleMode(std_handle, console_mode)) - if not success: - raise LegacyWindowsError("Unable to get legacy Windows Console Mode") - return console_mode.value - - -_FillConsoleOutputCharacterW = windll.kernel32.FillConsoleOutputCharacterW -_FillConsoleOutputCharacterW.argtypes = [ - wintypes.HANDLE, - ctypes.c_char, - wintypes.DWORD, - cast(Type[COORD], WindowsCoordinates), - ctypes.POINTER(wintypes.DWORD), -] -_FillConsoleOutputCharacterW.restype = wintypes.BOOL - - -def FillConsoleOutputCharacter( - std_handle: wintypes.HANDLE, - char: str, - length: int, - start: WindowsCoordinates, -) -> int: - """Writes a character to the console screen buffer a specified number of times, beginning at the specified coordinates. - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - char (str): The character to write. Must be a string of length 1. - length (int): The number of times to write the character. - start (WindowsCoordinates): The coordinates to start writing at. - - Returns: - int: The number of characters written. - """ - character = ctypes.c_char(char.encode()) - num_characters = wintypes.DWORD(length) - num_written = wintypes.DWORD(0) - _FillConsoleOutputCharacterW( - std_handle, - character, - num_characters, - start, - byref(num_written), - ) - return num_written.value - - -_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute -_FillConsoleOutputAttribute.argtypes = [ - wintypes.HANDLE, - wintypes.WORD, - wintypes.DWORD, - cast(Type[COORD], WindowsCoordinates), - ctypes.POINTER(wintypes.DWORD), -] -_FillConsoleOutputAttribute.restype = wintypes.BOOL - - -def FillConsoleOutputAttribute( - std_handle: wintypes.HANDLE, - attributes: int, - length: int, - start: WindowsCoordinates, -) -> int: - """Sets the character attributes for a specified number of character cells, - beginning at the specified coordinates in a screen buffer. - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - attributes (int): Integer value representing the foreground and background colours of the cells. - length (int): The number of cells to set the output attribute of. - start (WindowsCoordinates): The coordinates of the first cell whose attributes are to be set. - - Returns: - int: The number of cells whose attributes were actually set. - """ - num_cells = wintypes.DWORD(length) - style_attrs = wintypes.WORD(attributes) - num_written = wintypes.DWORD(0) - _FillConsoleOutputAttribute( - std_handle, style_attrs, num_cells, start, byref(num_written) - ) - return num_written.value - - -_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute -_SetConsoleTextAttribute.argtypes = [ - wintypes.HANDLE, - wintypes.WORD, -] -_SetConsoleTextAttribute.restype = wintypes.BOOL - - -def SetConsoleTextAttribute( - std_handle: wintypes.HANDLE, attributes: wintypes.WORD -) -> bool: - """Set the colour attributes for all text written after this function is called. - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - attributes (int): Integer value representing the foreground and background colours. - - - Returns: - bool: True if the attribute was set successfully, otherwise False. - """ - return bool(_SetConsoleTextAttribute(std_handle, attributes)) - - -_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo -_GetConsoleScreenBufferInfo.argtypes = [ - wintypes.HANDLE, - ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO), -] -_GetConsoleScreenBufferInfo.restype = wintypes.BOOL - - -def GetConsoleScreenBufferInfo( - std_handle: wintypes.HANDLE, -) -> CONSOLE_SCREEN_BUFFER_INFO: - """Retrieves information about the specified console screen buffer. - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - - Returns: - CONSOLE_SCREEN_BUFFER_INFO: A CONSOLE_SCREEN_BUFFER_INFO ctype struct contain information about - screen size, cursor position, colour attributes, and more.""" - console_screen_buffer_info = CONSOLE_SCREEN_BUFFER_INFO() - _GetConsoleScreenBufferInfo(std_handle, byref(console_screen_buffer_info)) - return console_screen_buffer_info - - -_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition -_SetConsoleCursorPosition.argtypes = [ - wintypes.HANDLE, - cast(Type[COORD], WindowsCoordinates), -] -_SetConsoleCursorPosition.restype = wintypes.BOOL - - -def SetConsoleCursorPosition( - std_handle: wintypes.HANDLE, coords: WindowsCoordinates -) -> bool: - """Set the position of the cursor in the console screen - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - coords (WindowsCoordinates): The coordinates to move the cursor to. - - Returns: - bool: True if the function succeeds, otherwise False. - """ - return bool(_SetConsoleCursorPosition(std_handle, coords)) - - -_GetConsoleCursorInfo = windll.kernel32.GetConsoleCursorInfo -_GetConsoleCursorInfo.argtypes = [ - wintypes.HANDLE, - ctypes.POINTER(CONSOLE_CURSOR_INFO), -] -_GetConsoleCursorInfo.restype = wintypes.BOOL - - -def GetConsoleCursorInfo( - std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO -) -> bool: - """Get the cursor info - used to get cursor visibility and width - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct that receives information - about the console's cursor. - - Returns: - bool: True if the function succeeds, otherwise False. - """ - return bool(_GetConsoleCursorInfo(std_handle, byref(cursor_info))) - - -_SetConsoleCursorInfo = windll.kernel32.SetConsoleCursorInfo -_SetConsoleCursorInfo.argtypes = [ - wintypes.HANDLE, - ctypes.POINTER(CONSOLE_CURSOR_INFO), -] -_SetConsoleCursorInfo.restype = wintypes.BOOL - - -def SetConsoleCursorInfo( - std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO -) -> bool: - """Set the cursor info - used for adjusting cursor visibility and width - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct containing the new cursor info. - - Returns: - bool: True if the function succeeds, otherwise False. - """ - return bool(_SetConsoleCursorInfo(std_handle, byref(cursor_info))) - - -_SetConsoleTitle = windll.kernel32.SetConsoleTitleW -_SetConsoleTitle.argtypes = [wintypes.LPCWSTR] -_SetConsoleTitle.restype = wintypes.BOOL - - -def SetConsoleTitle(title: str) -> bool: - """Sets the title of the current console window - - Args: - title (str): The new title of the console window. - - Returns: - bool: True if the function succeeds, otherwise False. - """ - return bool(_SetConsoleTitle(title)) - - -class LegacyWindowsTerm: - """This class allows interaction with the legacy Windows Console API. It should only be used in the context - of environments where virtual terminal processing is not available. However, if it is used in a Windows environment, - the entire API should work. - - Args: - file (IO[str]): The file which the Windows Console API HANDLE is retrieved from, defaults to sys.stdout. - """ - - BRIGHT_BIT = 8 - - # Indices are ANSI color numbers, values are the corresponding Windows Console API color numbers - ANSI_TO_WINDOWS = [ - 0, # black The Windows colours are defined in wincon.h as follows: - 4, # red define FOREGROUND_BLUE 0x0001 -- 0000 0001 - 2, # green define FOREGROUND_GREEN 0x0002 -- 0000 0010 - 6, # yellow define FOREGROUND_RED 0x0004 -- 0000 0100 - 1, # blue define FOREGROUND_INTENSITY 0x0008 -- 0000 1000 - 5, # magenta define BACKGROUND_BLUE 0x0010 -- 0001 0000 - 3, # cyan define BACKGROUND_GREEN 0x0020 -- 0010 0000 - 7, # white define BACKGROUND_RED 0x0040 -- 0100 0000 - 8, # bright black (grey) define BACKGROUND_INTENSITY 0x0080 -- 1000 0000 - 12, # bright red - 10, # bright green - 14, # bright yellow - 9, # bright blue - 13, # bright magenta - 11, # bright cyan - 15, # bright white - ] - - def __init__(self, file: "IO[str]") -> None: - handle = GetStdHandle(STDOUT) - self._handle = handle - default_text = GetConsoleScreenBufferInfo(handle).wAttributes - self._default_text = default_text - - self._default_fore = default_text & 7 - self._default_back = (default_text >> 4) & 7 - self._default_attrs = self._default_fore | (self._default_back << 4) - - self._file = file - self.write = file.write - self.flush = file.flush - - @property - def cursor_position(self) -> WindowsCoordinates: - """Returns the current position of the cursor (0-based) - - Returns: - WindowsCoordinates: The current cursor position. - """ - coord: COORD = GetConsoleScreenBufferInfo(self._handle).dwCursorPosition - return WindowsCoordinates(row=cast(int, coord.Y), col=cast(int, coord.X)) - - @property - def screen_size(self) -> WindowsCoordinates: - """Returns the current size of the console screen buffer, in character columns and rows - - Returns: - WindowsCoordinates: The width and height of the screen as WindowsCoordinates. - """ - screen_size: COORD = GetConsoleScreenBufferInfo(self._handle).dwSize - return WindowsCoordinates( - row=cast(int, screen_size.Y), col=cast(int, screen_size.X) - ) - - def write_text(self, text: str) -> None: - """Write text directly to the terminal without any modification of styles - - Args: - text (str): The text to write to the console - """ - self.write(text) - self.flush() - - def write_styled(self, text: str, style: Style) -> None: - """Write styled text to the terminal. - - Args: - text (str): The text to write - style (Style): The style of the text - """ - color = style.color - bgcolor = style.bgcolor - if style.reverse: - color, bgcolor = bgcolor, color - - if color: - fore = color.downgrade(ColorSystem.WINDOWS).number - fore = fore if fore is not None else 7 # Default to ANSI 7: White - if style.bold: - fore = fore | self.BRIGHT_BIT - if style.dim: - fore = fore & ~self.BRIGHT_BIT - fore = self.ANSI_TO_WINDOWS[fore] - else: - fore = self._default_fore - - if bgcolor: - back = bgcolor.downgrade(ColorSystem.WINDOWS).number - back = back if back is not None else 0 # Default to ANSI 0: Black - back = self.ANSI_TO_WINDOWS[back] - else: - back = self._default_back - - assert fore is not None - assert back is not None - - SetConsoleTextAttribute( - self._handle, attributes=ctypes.c_ushort(fore | (back << 4)) - ) - self.write_text(text) - SetConsoleTextAttribute(self._handle, attributes=self._default_text) - - def move_cursor_to(self, new_position: WindowsCoordinates) -> None: - """Set the position of the cursor - - Args: - new_position (WindowsCoordinates): The WindowsCoordinates representing the new position of the cursor. - """ - if new_position.col < 0 or new_position.row < 0: - return - SetConsoleCursorPosition(self._handle, coords=new_position) - - def erase_line(self) -> None: - """Erase all content on the line the cursor is currently located at""" - screen_size = self.screen_size - cursor_position = self.cursor_position - cells_to_erase = screen_size.col - start_coordinates = WindowsCoordinates(row=cursor_position.row, col=0) - FillConsoleOutputCharacter( - self._handle, " ", length=cells_to_erase, start=start_coordinates - ) - FillConsoleOutputAttribute( - self._handle, - self._default_attrs, - length=cells_to_erase, - start=start_coordinates, - ) - - def erase_end_of_line(self) -> None: - """Erase all content from the cursor position to the end of that line""" - cursor_position = self.cursor_position - cells_to_erase = self.screen_size.col - cursor_position.col - FillConsoleOutputCharacter( - self._handle, " ", length=cells_to_erase, start=cursor_position - ) - FillConsoleOutputAttribute( - self._handle, - self._default_attrs, - length=cells_to_erase, - start=cursor_position, - ) - - def erase_start_of_line(self) -> None: - """Erase all content from the cursor position to the start of that line""" - row, col = self.cursor_position - start = WindowsCoordinates(row, 0) - FillConsoleOutputCharacter(self._handle, " ", length=col, start=start) - FillConsoleOutputAttribute( - self._handle, self._default_attrs, length=col, start=start - ) - - def move_cursor_up(self) -> None: - """Move the cursor up a single cell""" - cursor_position = self.cursor_position - SetConsoleCursorPosition( - self._handle, - coords=WindowsCoordinates( - row=cursor_position.row - 1, col=cursor_position.col - ), - ) - - def move_cursor_down(self) -> None: - """Move the cursor down a single cell""" - cursor_position = self.cursor_position - SetConsoleCursorPosition( - self._handle, - coords=WindowsCoordinates( - row=cursor_position.row + 1, - col=cursor_position.col, - ), - ) - - def move_cursor_forward(self) -> None: - """Move the cursor forward a single cell. Wrap to the next line if required.""" - row, col = self.cursor_position - if col == self.screen_size.col - 1: - row += 1 - col = 0 - else: - col += 1 - SetConsoleCursorPosition( - self._handle, coords=WindowsCoordinates(row=row, col=col) - ) - - def move_cursor_to_column(self, column: int) -> None: - """Move cursor to the column specified by the zero-based column index, staying on the same row - - Args: - column (int): The zero-based column index to move the cursor to. - """ - row, _ = self.cursor_position - SetConsoleCursorPosition(self._handle, coords=WindowsCoordinates(row, column)) - - def move_cursor_backward(self) -> None: - """Move the cursor backward a single cell. Wrap to the previous line if required.""" - row, col = self.cursor_position - if col == 0: - row -= 1 - col = self.screen_size.col - 1 - else: - col -= 1 - SetConsoleCursorPosition( - self._handle, coords=WindowsCoordinates(row=row, col=col) - ) - - def hide_cursor(self) -> None: - """Hide the cursor""" - current_cursor_size = self._get_cursor_size() - invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=0) - SetConsoleCursorInfo(self._handle, cursor_info=invisible_cursor) - - def show_cursor(self) -> None: - """Show the cursor""" - current_cursor_size = self._get_cursor_size() - visible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=1) - SetConsoleCursorInfo(self._handle, cursor_info=visible_cursor) - - def set_title(self, title: str) -> None: - """Set the title of the terminal window - - Args: - title (str): The new title of the console window - """ - assert len(title) < 255, "Console title must be less than 255 characters" - SetConsoleTitle(title) - - def _get_cursor_size(self) -> int: - """Get the percentage of the character cell that is filled by the cursor""" - cursor_info = CONSOLE_CURSOR_INFO() - GetConsoleCursorInfo(self._handle, cursor_info=cursor_info) - return int(cursor_info.dwSize) - - -if __name__ == "__main__": - handle = GetStdHandle() - - from pip._vendor.rich.console import Console - - console = Console() - - term = LegacyWindowsTerm(sys.stdout) - term.set_title("Win32 Console Examples") - - style = Style(color="black", bgcolor="red") - - heading = Style.parse("black on green") - - # Check colour output - console.rule("Checking colour output") - console.print("[on red]on red!") - console.print("[blue]blue!") - console.print("[yellow]yellow!") - console.print("[bold yellow]bold yellow!") - console.print("[bright_yellow]bright_yellow!") - console.print("[dim bright_yellow]dim bright_yellow!") - console.print("[italic cyan]italic cyan!") - console.print("[bold white on blue]bold white on blue!") - console.print("[reverse bold white on blue]reverse bold white on blue!") - console.print("[bold black on cyan]bold black on cyan!") - console.print("[black on green]black on green!") - console.print("[blue on green]blue on green!") - console.print("[white on black]white on black!") - console.print("[black on white]black on white!") - console.print("[#1BB152 on #DA812D]#1BB152 on #DA812D!") - - # Check cursor movement - console.rule("Checking cursor movement") - console.print() - term.move_cursor_backward() - term.move_cursor_backward() - term.write_text("went back and wrapped to prev line") - time.sleep(1) - term.move_cursor_up() - term.write_text("we go up") - time.sleep(1) - term.move_cursor_down() - term.write_text("and down") - time.sleep(1) - term.move_cursor_up() - term.move_cursor_backward() - term.move_cursor_backward() - term.write_text("we went up and back 2") - time.sleep(1) - term.move_cursor_down() - term.move_cursor_backward() - term.move_cursor_backward() - term.write_text("we went down and back 2") - time.sleep(1) - - # Check erasing of lines - term.hide_cursor() - console.print() - console.rule("Checking line erasing") - console.print("\n...Deleting to the start of the line...") - term.write_text("The red arrow shows the cursor location, and direction of erase") - time.sleep(1) - term.move_cursor_to_column(16) - term.write_styled("<", Style.parse("black on red")) - term.move_cursor_backward() - time.sleep(1) - term.erase_start_of_line() - time.sleep(1) - - console.print("\n\n...And to the end of the line...") - term.write_text("The red arrow shows the cursor location, and direction of erase") - time.sleep(1) - - term.move_cursor_to_column(16) - term.write_styled(">", Style.parse("black on red")) - time.sleep(1) - term.erase_end_of_line() - time.sleep(1) - - console.print("\n\n...Now the whole line will be erased...") - term.write_styled("I'm going to disappear!", style=Style.parse("black on cyan")) - time.sleep(1) - term.erase_line() - - term.show_cursor() - print("\n") diff --git a/spaces/tobiascz/demotime/pytorch_grad_cam/fullgrad_cam.py b/spaces/tobiascz/demotime/pytorch_grad_cam/fullgrad_cam.py deleted file mode 100644 index 1a2685eff60d63ee758e4b11510ad148311160e9..0000000000000000000000000000000000000000 --- a/spaces/tobiascz/demotime/pytorch_grad_cam/fullgrad_cam.py +++ /dev/null @@ -1,95 +0,0 @@ -import numpy as np -import torch -from pytorch_grad_cam.base_cam import BaseCAM -from pytorch_grad_cam.utils.find_layers import find_layer_predicate_recursive -from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection -from pytorch_grad_cam.utils.image import scale_accross_batch_and_channels, scale_cam_image - -# https://arxiv.org/abs/1905.00780 - - -class FullGrad(BaseCAM): - def __init__(self, model, target_layers, use_cuda=False, - reshape_transform=None): - if len(target_layers) > 0: - print( - "Warning: target_layers is ignored in FullGrad. All bias layers will be used instead") - - def layer_with_2D_bias(layer): - bias_target_layers = [torch.nn.Conv2d, torch.nn.BatchNorm2d] - if type(layer) in bias_target_layers and layer.bias is not None: - return True - return False - target_layers = find_layer_predicate_recursive( - model, layer_with_2D_bias) - super( - FullGrad, - self).__init__( - model, - target_layers, - use_cuda, - reshape_transform, - compute_input_gradient=True) - self.bias_data = [self.get_bias_data( - layer).cpu().numpy() for layer in target_layers] - - def get_bias_data(self, layer): - # Borrowed from official paper impl: - # https://github.com/idiap/fullgrad-saliency/blob/master/saliency/tensor_extractor.py#L47 - if isinstance(layer, torch.nn.BatchNorm2d): - bias = - (layer.running_mean * layer.weight - / torch.sqrt(layer.running_var + layer.eps)) + layer.bias - return bias.data - else: - return layer.bias.data - - def compute_cam_per_layer( - self, - input_tensor, - target_category, - eigen_smooth): - input_grad = input_tensor.grad.data.cpu().numpy() - grads_list = [g.cpu().data.numpy() for g in - self.activations_and_grads.gradients] - cam_per_target_layer = [] - target_size = self.get_target_width_height(input_tensor) - - gradient_multiplied_input = input_grad * input_tensor.data.cpu().numpy() - gradient_multiplied_input = np.abs(gradient_multiplied_input) - gradient_multiplied_input = scale_accross_batch_and_channels( - gradient_multiplied_input, - target_size) - cam_per_target_layer.append(gradient_multiplied_input) - - # Loop over the saliency image from every layer - assert(len(self.bias_data) == len(grads_list)) - for bias, grads in zip(self.bias_data, grads_list): - bias = bias[None, :, None, None] - # In the paper they take the absolute value, - # but possibily taking only the positive gradients will work - # better. - bias_grad = np.abs(bias * grads) - result = scale_accross_batch_and_channels( - bias_grad, target_size) - result = np.sum(result, axis=1) - cam_per_target_layer.append(result[:, None, :]) - cam_per_target_layer = np.concatenate(cam_per_target_layer, axis=1) - if eigen_smooth: - # Resize to a smaller image, since this method typically has a very large number of channels, - # and then consumes a lot of memory - cam_per_target_layer = scale_accross_batch_and_channels( - cam_per_target_layer, (target_size[0] // 8, target_size[1] // 8)) - cam_per_target_layer = get_2d_projection(cam_per_target_layer) - cam_per_target_layer = cam_per_target_layer[:, None, :, :] - cam_per_target_layer = scale_accross_batch_and_channels( - cam_per_target_layer, - target_size) - else: - cam_per_target_layer = np.sum( - cam_per_target_layer, axis=1)[:, None, :] - - return cam_per_target_layer - - def aggregate_multi_layers(self, cam_per_target_layer): - result = np.sum(cam_per_target_layer, axis=1) - return scale_cam_image(result) diff --git a/spaces/tomofi/MMOCR/docs/en/conf.py b/spaces/tomofi/MMOCR/docs/en/conf.py deleted file mode 100644 index baad575a4a383db7ba33dd4daac68bc93df45345..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/docs/en/conf.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. - -import os -import subprocess -import sys - -import pytorch_sphinx_theme - -sys.path.insert(0, os.path.abspath('../../')) - -# -- Project information ----------------------------------------------------- - -project = 'MMOCR' -copyright = '2020-2030, OpenMMLab' -author = 'OpenMMLab' - -# The full version, including alpha/beta/rc tags -version_file = '../../mmocr/version.py' -with open(version_file, 'r') as f: - exec(compile(f.read(), version_file, 'exec')) -__version__ = locals()['__version__'] -release = __version__ - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', - 'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser' -] - -autodoc_mock_imports = ['mmcv._ext'] - -# Ignore >>> when copying code -copybutton_prompt_text = r'>>> |\.\.\. ' -copybutton_prompt_is_regexp = True - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -source_suffix = { - '.rst': 'restructuredtext', - '.md': 'markdown', -} - -# The master toctree document. -master_doc = 'index' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -# html_theme = 'sphinx_rtd_theme' -html_theme = 'pytorch_sphinx_theme' -html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] -html_theme_options = { - 'logo_url': - 'https://mmocr.readthedocs.io/en/latest/', - 'menu': [ - { - 'name': - 'Tutorial', - 'url': - 'https://colab.research.google.com/github/' - 'open-mmlab/mmocr/blob/main/demo/MMOCR_Tutorial.ipynb' - }, - { - 'name': 'GitHub', - 'url': 'https://github.com/open-mmlab/mmocr' - }, - { - 'name': - 'Upstream', - 'children': [ - { - 'name': 'MMCV', - 'url': 'https://github.com/open-mmlab/mmcv', - 'description': 'Foundational library for computer vision' - }, - { - 'name': 'MMDetection', - 'url': 'https://github.com/open-mmlab/mmdetection', - 'description': 'Object detection toolbox and benchmark' - }, - ] - }, - ], - # Specify the language of shared menu - 'menu_lang': - 'en' -} - -language = 'en' - -master_doc = 'index' - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] -html_css_files = ['css/readthedocs.css'] - -# Enable ::: for my_st -myst_enable_extensions = ['colon_fence'] - - -def builder_inited_handler(app): - subprocess.run(['./merge_docs.sh']) - subprocess.run(['./stats.py']) - - -def setup(app): - app.connect('builder-inited', builder_inited_handler) diff --git a/spaces/tomofi/MMOCR/mmocr/models/ner/losses/masked_focal_loss.py b/spaces/tomofi/MMOCR/mmocr/models/ner/losses/masked_focal_loss.py deleted file mode 100644 index 065dc781db3d8af4ba9fd78c4cf27cca95f799eb..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/ner/losses/masked_focal_loss.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from torch import nn - -from mmocr.models.builder import LOSSES -from mmocr.models.common.losses.focal_loss import FocalLoss - - -@LOSSES.register_module() -class MaskedFocalLoss(nn.Module): - """The implementation of masked focal loss. - - The mask has 1 for real tokens and 0 for padding tokens, - which only keep active parts of the focal loss - Args: - num_labels (int): Number of classes in labels. - ignore_index (int): Specifies a target value that is ignored - and does not contribute to the input gradient. - """ - - def __init__(self, num_labels=None, ignore_index=0): - super().__init__() - self.num_labels = num_labels - self.criterion = FocalLoss(ignore_index=ignore_index) - - def forward(self, logits, img_metas): - '''Loss forword. - Args: - logits: Model output with shape [N, C]. - img_metas (dict): A dict containing the following keys: - - img (list]): This parameter is reserved. - - labels (list[int]): The labels for each word - of the sequence. - - texts (list): The words of the sequence. - - input_ids (list): The ids for each word of - the sequence. - - attention_mask (list): The mask for each word - of the sequence. The mask has 1 for real tokens - and 0 for padding tokens. Only real tokens are - attended to. - - token_type_ids (list): The tokens for each word - of the sequence. - ''' - - labels = img_metas['labels'] - attention_masks = img_metas['attention_masks'] - - # Only keep active parts of the loss - if attention_masks is not None: - active_loss = attention_masks.view(-1) == 1 - active_logits = logits.view(-1, self.num_labels)[active_loss] - active_labels = labels.view(-1)[active_loss] - loss = self.criterion(active_logits, active_labels) - else: - loss = self.criterion( - logits.view(-1, self.num_labels), labels.view(-1)) - return {'loss_cls': loss} diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/evaluation/rotated_icdar2013/e2e/prepare_results.py b/spaces/tomofi/MaskTextSpotterV3-OCR/evaluation/rotated_icdar2013/e2e/prepare_results.py deleted file mode 100644 index dc0d71dcc642b8ec95f16ce79ce599c122a95836..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/evaluation/rotated_icdar2013/e2e/prepare_results.py +++ /dev/null @@ -1,267 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -import sys -import os -sys.path.append('./') -import shapely -from shapely.geometry import Polygon,MultiPoint -import numpy as np -import editdistance -sys.path.append('../../') -from weighted_editdistance import weighted_edit_distance -from tqdm import tqdm -try: - import pickle -except ImportError: - import cPickle as pickle - -def list_from_str(st): - line = st.split(',') - # box[0:4], polygon[4:12], word, seq_word, detection_score, rec_socre, seq_score, char_score_path - new_line = [float(a) for a in line[4:12]]+[float(line[-4])]+[line[-5]]+[line[-6]]+[float(line[-3])]+[float(line[-2])] + [line[-1]] - return new_line - -def polygon_from_list(line): - """ - Create a shapely polygon object from gt or dt line. - """ - polygon_points = np.array(line).reshape(4, 2) - polygon = Polygon(polygon_points).convex_hull - return polygon - -def polygon_iou(list1, list2): - """ - Intersection over union between two shapely polygons. - """ - polygon_points1 = np.array(list1).reshape(4, 2) - poly1 = Polygon(polygon_points1).convex_hull - polygon_points2 = np.array(list2).reshape(4, 2) - poly2 = Polygon(polygon_points2).convex_hull - union_poly = np.concatenate((polygon_points1,polygon_points2)) - if not poly1.intersects(poly2): # this test is fast and can accelerate calculation - iou = 0 - else: - try: - inter_area = poly1.intersection(poly2).area - #union_area = poly1.area + poly2.area - inter_area - union_area = MultiPoint(union_poly).convex_hull.area - iou = float(inter_area) / (union_area+1e-6) - except shapely.geos.TopologicalError: - print('shapely.geos.TopologicalError occured, iou set to 0') - iou = 0 - return iou - -def nms(boxes,overlap): - rec_scores = [b[-2] for b in boxes] - indices = sorted(range(len(rec_scores)), key=lambda k: -rec_scores[k]) - box_num = len(boxes) - nms_flag = [True]*box_num - for i in range(box_num): - ii = indices[i] - if not nms_flag[ii]: - continue - for j in range(box_num): - jj = indices[j] - if j == i: - continue - if not nms_flag[jj]: - continue - box1 = boxes[ii] - box2 = boxes[jj] - box1_score = rec_scores[ii] - box2_score = rec_scores[jj] - str1 = box1[9] - str2 = box2[9] - box_i = [box1[0],box1[1],box1[4],box1[5]] - box_j = [box2[0],box2[1],box2[4],box2[5]] - poly1 = polygon_from_list(box1[0:8]) - poly2 = polygon_from_list(box2[0:8]) - iou = polygon_iou(box1[0:8],box2[0:8]) - thresh = overlap - - if iou > thresh: - if box1_score > box2_score: - nms_flag[jj] = False - if box1_score == box2_score and poly1.area > poly2.area: - nms_flag[jj] = False - if box1_score == box2_score and poly1.area<=poly2.area: - nms_flag[ii] = False - break - - return nms_flag - -def packing(save_dir, cache_dir, pack_name): - files = os.listdir(save_dir) - if not os.path.exists(cache_dir): - os.mkdir(cache_dir) - os.system('zip -r -q -j '+os.path.join(cache_dir, pack_name+'.zip')+' '+save_dir+'/*') - -def test_single(results_dir,lexicon_type=3,cache_dir='./cache_dir',score_det=0.5,score_rec=0.5,score_rec_seq=0.5,overlap=0.2, use_lexicon=True, weighted_ed=True, use_seq=False, use_char=False, mix=False): - ''' - results_dir: result directory - score_det: score of detection bounding box - score_rec: score of the mask recognition branch - socre_rec_seq: score of the sequence recognition branch - overlap: overlap threshold used for nms - lexicon_type: 1 for generic; 2 for weak; 3 for strong - use_seq: use the recognition result of sequence branch - use_mix: use both the recognition result of the mask and sequence branches, selected by score - ''' - print('score_det:', 'score_det:', score_det, 'score_rec:', score_rec, 'score_rec_seq:', score_rec_seq, 'lexicon_type:', lexicon_type, 'weighted_ed:', weighted_ed, 'use_seq:', use_seq, 'use_char:', use_char, 'mix:', mix) - if not os.path.exists(cache_dir): - os.mkdir(cache_dir) - nms_dir = os.path.join(cache_dir,str(score_det)+'_'+str(score_rec)+'_'+str(score_rec_seq)) - if not os.path.exists(nms_dir): - os.mkdir(nms_dir) - if lexicon_type==1: - # generic lexicon - lexicon_path = '../../lexicons/ic13/GenericVocabulary_new.txt' - lexicon_fid=open(lexicon_path, 'r') - pair_list = open('../../lexicons/ic13/GenericVocabulary_pair_list.txt', 'r') - pairs = dict() - for line in pair_list.readlines(): - line=line.strip() - word = line.split(' ')[0].upper() - word_gt = line[len(word)+1:] - pairs[word] = word_gt - lexicon_fid=open(lexicon_path, 'r') - lexicon=[] - for line in lexicon_fid.readlines(): - line=line.strip() - lexicon.append(line) - if lexicon_type==2: - # weak lexicon - lexicon_path = '../../lexicons/ic13/ch4_test_vocabulary_new.txt' - lexicon_fid=open(lexicon_path, 'r') - pair_list = open('../../lexicons/ic13/ch4_test_vocabulary_pair_list.txt', 'r') - pairs = dict() - for line in pair_list.readlines(): - line=line.strip() - word = line.split(' ')[0].upper() - word_gt = line[len(word)+1:] - pairs[word] = word_gt - lexicon_fid=open(lexicon_path, 'r') - lexicon=[] - for line in lexicon_fid.readlines(): - line=line.strip() - lexicon.append(line) - - for i in tqdm(range(1,234)): - img = 'img_'+str(i)+'.jpg' - gt_img = 'gt_img_'+str(i)+'.txt' - if lexicon_type==3: - # weak - lexicon_path = '../../lexicons/ic13/new_strong_lexicon/new_voc_img_' + str(i) + '.txt' - lexicon_fid=open(lexicon_path, 'r') - pair_list = open('../../lexicons/ic13/new_strong_lexicon/pair_voc_img_' + str(i) + '.txt', 'r') - pairs = dict() - for line in pair_list.readlines(): - line=line.strip() - word = line.split(' ')[0].upper() - word_gt = line[len(word)+1:] - pairs[word] = word_gt - lexicon_fid=open(lexicon_path, 'r') - lexicon=[] - for line in lexicon_fid.readlines(): - line=line.strip() - lexicon.append(line) - result_path = os.path.join(results_dir,'res_img_'+str(i)+'.txt') - if os.path.isfile(result_path): - with open(result_path,'r') as f: - dt_lines = [a.strip() for a in f.readlines()] - dt_lines = [list_from_str(dt) for dt in dt_lines] - else: - dt_lines = [] - dt_lines = [dt for dt in dt_lines if dt[-2]>score_rec_seq and dt[-3]>score_rec and dt[-6]>score_det] - nms_flag = nms(dt_lines,overlap) - boxes = [] - for k in range(len(dt_lines)): - dt = dt_lines[k] - if nms_flag[k]: - if dt not in boxes: - boxes.append(dt) - - with open(os.path.join(nms_dir,'res_img_'+str(i)+'.txt'),'w') as f: - for g in boxes: - gt_coors = [int(b) for b in g[0:8]] - with open('../../../' + g[-1], "rb") as input_file: - # with open(g[-1], "rb") as input_file: - dict_scores = pickle.load(input_file) - if use_char and use_seq: - if g[-2]>g[-3]: - word = g[-5] - scores = dict_scores['seq_char_scores'][:,1:-1].swapaxes(0,1) - else: - word = g[-4] - scores = dict_scores['seg_char_scores'] - elif use_seq: - word = g[-5] - scores = dict_scores['seq_char_scores'][:,1:-1].swapaxes(0,1) - else: - word = g[-4] - scores = dict_scores['seg_char_scores'] - if not use_lexicon: - match_word = word - match_dist = 0. - else: - match_word, match_dist = find_match_word(word, lexicon, pairs, scores, use_lexicon, weighted_ed) - if match_dist<1.5 or lexicon_type==1: - gt_coor_strs = [str(a) for a in gt_coors]+ [match_word] - f.write(','.join(gt_coor_strs)+'\r\n') - - pack_name = str(score_det)+'_'+str(score_rec)+'_over'+str(overlap) - - packing(nms_dir,cache_dir,pack_name) - submit_file_path = os.path.join(cache_dir, pack_name+'.zip') - return submit_file_path - -def find_match_word(rec_str, lexicon, pairs, scores_numpy, use_ed = True, weighted_ed = False): - if not use_ed: - return rec_str - rec_str = rec_str.upper() - dist_min = 100 - dist_min_pre = 100 - match_word = '' - match_dist = 100 - if not weighted_ed: - for word in lexicon: - word = word.upper() - ed = editdistance.eval(rec_str, word) - length_dist = abs(len(word) - len(rec_str)) - # dist = ed + length_dist - dist = ed - if dist EasyDict() - self._shared_optimizers = OrderedDict() # device_name => optimizer_class - self._gradient_shapes = None # [shape, ...] - self._report_mem_usage = report_mem_usage - - # Validate arguments. - assert callable(self.optimizer_class) - - # Share internal state if requested. - if share is not None: - assert isinstance(share, Optimizer) - assert self.optimizer_class is share.optimizer_class - assert self.learning_rate is share.learning_rate - assert self.optimizer_kwargs == share.optimizer_kwargs - self._shared_optimizers = share._shared_optimizers # pylint: disable=protected-access - - def _get_device(self, device_name: str): - """Get internal state for the given TensorFlow device.""" - tfutil.assert_tf_initialized() - if device_name in self._devices: - return self._devices[device_name] - - # Initialize fields. - device = util.EasyDict() - device.name = device_name - device.optimizer = None # Underlying optimizer: optimizer_class - device.loss_scaling_var = None # Log2 of loss scaling: tf.Variable - device.grad_raw = OrderedDict() # Raw gradients: var => [grad, ...] - device.grad_clean = OrderedDict() # Clean gradients: var => grad - device.grad_acc_vars = OrderedDict() # Accumulation sums: var => tf.Variable - device.grad_acc_count = None # Accumulation counter: tf.Variable - device.grad_acc = OrderedDict() # Accumulated gradients: var => grad - - # Setup TensorFlow objects. - with tfutil.absolute_name_scope(self.scope + "/Devices"), tf.device(device_name), tf.control_dependencies(None): - if device_name not in self._shared_optimizers: - optimizer_name = self.scope.replace("/", "_") + "_opt%d" % len(self._shared_optimizers) - self._shared_optimizers[device_name] = self.optimizer_class(name=optimizer_name, learning_rate=self.learning_rate, **self.optimizer_kwargs) - device.optimizer = self._shared_optimizers[device_name] - if self.use_loss_scaling: - device.loss_scaling_var = tf.Variable(np.float32(self.loss_scaling_init), trainable=False, name="loss_scaling_var") - - # Register device. - self._devices[device_name] = device - return device - - def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None: - """Register the gradients of the given loss function with respect to the given variables. - Intended to be called once per GPU.""" - tfutil.assert_tf_initialized() - assert not self._updates_applied - device = self._get_device(loss.device) - - # Validate trainables. - if isinstance(trainable_vars, dict): - trainable_vars = list(trainable_vars.values()) # allow passing in Network.trainables as vars - assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1 - assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss]) - assert all(var.device == device.name for var in trainable_vars) - - # Validate shapes. - if self._gradient_shapes is None: - self._gradient_shapes = [var.shape.as_list() for var in trainable_vars] - assert len(trainable_vars) == len(self._gradient_shapes) - assert all(var.shape.as_list() == var_shape for var, var_shape in zip(trainable_vars, self._gradient_shapes)) - - # Report memory usage if requested. - deps = [loss] - if self._report_mem_usage: - self._report_mem_usage = False - try: - with tf.name_scope(self.id + '_mem'), tf.device(device.name), tf.control_dependencies([loss]): - deps.append(autosummary.autosummary(self.id + "/mem_usage_gb", tf.contrib.memory_stats.BytesInUse() / 2**30)) - except tf.errors.NotFoundError: - pass - - # Compute gradients. - with tf.name_scope(self.id + "_grad"), tf.device(device.name), tf.control_dependencies(deps): - loss = self.apply_loss_scaling(tf.cast(loss, tf.float32)) - gate = tf.train.Optimizer.GATE_NONE # disable gating to reduce memory usage - grad_list = device.optimizer.compute_gradients(loss=loss, var_list=trainable_vars, gate_gradients=gate) - - # Register gradients. - for grad, var in grad_list: - if var not in device.grad_raw: - device.grad_raw[var] = [] - device.grad_raw[var].append(grad) - - def apply_updates(self, allow_no_op: bool = False) -> tf.Operation: - """Construct training op to update the registered variables based on their gradients.""" - tfutil.assert_tf_initialized() - assert not self._updates_applied - self._updates_applied = True - all_ops = [] - - # Check for no-op. - if allow_no_op and len(self._devices) == 0: - with tfutil.absolute_name_scope(self.scope): - return tf.no_op(name='TrainingOp') - - # Clean up gradients. - for device_idx, device in enumerate(self._devices.values()): - with tfutil.absolute_name_scope(self.scope + "/Clean%d" % device_idx), tf.device(device.name): - for var, grad in device.grad_raw.items(): - - # Filter out disconnected gradients and convert to float32. - grad = [g for g in grad if g is not None] - grad = [tf.cast(g, tf.float32) for g in grad] - - # Sum within the device. - if len(grad) == 0: - grad = tf.zeros(var.shape) # No gradients => zero. - elif len(grad) == 1: - grad = grad[0] # Single gradient => use as is. - else: - grad = tf.add_n(grad) # Multiple gradients => sum. - - # Scale as needed. - scale = 1.0 / len(device.grad_raw[var]) / len(self._devices) - scale = tf.constant(scale, dtype=tf.float32, name="scale") - if self.minibatch_multiplier is not None: - scale /= tf.cast(self.minibatch_multiplier, tf.float32) - scale = self.undo_loss_scaling(scale) - device.grad_clean[var] = grad * scale - - # Sum gradients across devices. - if len(self._devices) > 1: - with tfutil.absolute_name_scope(self.scope + "/Broadcast"), tf.device(None): - if platform.system() == "Windows": # Windows => NCCL ops are not available. - self._broadcast_fallback() - elif tf.VERSION.startswith("1.15."): # TF 1.15 => NCCL ops are broken: https://github.com/tensorflow/tensorflow/issues/41539 - self._broadcast_fallback() - else: # Otherwise => NCCL ops are safe to use. - self._broadcast_nccl() - - # Apply updates separately on each device. - for device_idx, device in enumerate(self._devices.values()): - with tfutil.absolute_name_scope(self.scope + "/Apply%d" % device_idx), tf.device(device.name): - # pylint: disable=cell-var-from-loop - - # Accumulate gradients over time. - if self.minibatch_multiplier is None: - acc_ok = tf.constant(True, name='acc_ok') - device.grad_acc = OrderedDict(device.grad_clean) - else: - # Create variables. - with tf.control_dependencies(None): - for var in device.grad_clean.keys(): - device.grad_acc_vars[var] = tf.Variable(tf.zeros(var.shape), trainable=False, name="grad_acc_var") - device.grad_acc_count = tf.Variable(tf.zeros([]), trainable=False, name="grad_acc_count") - - # Track counter. - count_cur = device.grad_acc_count + 1.0 - count_inc_op = lambda: tf.assign(device.grad_acc_count, count_cur) - count_reset_op = lambda: tf.assign(device.grad_acc_count, tf.zeros([])) - acc_ok = (count_cur >= tf.cast(self.minibatch_multiplier, tf.float32)) - all_ops.append(tf.cond(acc_ok, count_reset_op, count_inc_op)) - - # Track gradients. - for var, grad in device.grad_clean.items(): - acc_var = device.grad_acc_vars[var] - acc_cur = acc_var + grad - device.grad_acc[var] = acc_cur - with tf.control_dependencies([acc_cur]): - acc_inc_op = lambda: tf.assign(acc_var, acc_cur) - acc_reset_op = lambda: tf.assign(acc_var, tf.zeros(var.shape)) - all_ops.append(tf.cond(acc_ok, acc_reset_op, acc_inc_op)) - - # No overflow => apply gradients. - all_ok = tf.reduce_all(tf.stack([acc_ok] + [tf.reduce_all(tf.is_finite(g)) for g in device.grad_acc.values()])) - apply_op = lambda: device.optimizer.apply_gradients([(tf.cast(grad, var.dtype), var) for var, grad in device.grad_acc.items()]) - all_ops.append(tf.cond(all_ok, apply_op, tf.no_op)) - - # Adjust loss scaling. - if self.use_loss_scaling: - ls_inc_op = lambda: tf.assign_add(device.loss_scaling_var, self.loss_scaling_inc) - ls_dec_op = lambda: tf.assign_sub(device.loss_scaling_var, self.loss_scaling_dec) - ls_update_op = lambda: tf.group(tf.cond(all_ok, ls_inc_op, ls_dec_op)) - all_ops.append(tf.cond(acc_ok, ls_update_op, tf.no_op)) - - # Last device => report statistics. - if device_idx == len(self._devices) - 1: - all_ops.append(autosummary.autosummary(self.id + "/learning_rate", tf.convert_to_tensor(self.learning_rate))) - all_ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(all_ok, 0, 1), condition=acc_ok)) - if self.use_loss_scaling: - all_ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", device.loss_scaling_var)) - - # Initialize variables. - self.reset_optimizer_state() - if self.use_loss_scaling: - tfutil.init_uninitialized_vars([device.loss_scaling_var for device in self._devices.values()]) - if self.minibatch_multiplier is not None: - tfutil.run([var.initializer for device in self._devices.values() for var in list(device.grad_acc_vars.values()) + [device.grad_acc_count]]) - - # Group everything into a single op. - with tfutil.absolute_name_scope(self.scope): - return tf.group(*all_ops, name="TrainingOp") - - def reset_optimizer_state(self) -> None: - """Reset internal state of the underlying optimizer.""" - tfutil.assert_tf_initialized() - tfutil.run([var.initializer for device in self._devices.values() for var in device.optimizer.variables()]) - - def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]: - """Get or create variable representing log2 of the current dynamic loss scaling factor.""" - return self._get_device(device).loss_scaling_var - - def apply_loss_scaling(self, value: TfExpression) -> TfExpression: - """Apply dynamic loss scaling for the given expression.""" - assert tfutil.is_tf_expression(value) - if not self.use_loss_scaling: - return value - return value * tfutil.exp2(self.get_loss_scaling_var(value.device)) - - def undo_loss_scaling(self, value: TfExpression) -> TfExpression: - """Undo the effect of dynamic loss scaling for the given expression.""" - assert tfutil.is_tf_expression(value) - if not self.use_loss_scaling: - return value - return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type - - def _broadcast_nccl(self): - """Sum gradients across devices using NCCL ops (fast path).""" - from tensorflow.python.ops import nccl_ops # pylint: disable=no-name-in-module - for all_vars in zip(*[device.grad_clean.keys() for device in self._devices.values()]): - if any(x.shape.num_elements() > 0 for x in all_vars): - all_grads = [device.grad_clean[var] for device, var in zip(self._devices.values(), all_vars)] - all_grads = nccl_ops.all_sum(all_grads) - for device, var, grad in zip(self._devices.values(), all_vars, all_grads): - device.grad_clean[var] = grad - - def _broadcast_fallback(self): - """Sum gradients across devices using TensorFlow collective ops (slow fallback path).""" - from tensorflow.python.ops import collective_ops # pylint: disable=no-name-in-module - global _collective_ops_warning_printed, _collective_ops_group_key, _collective_ops_instance_key - if all(x.shape.num_elements() == 0 for device in self._devices.values() for x in device.grad_clean.values()): - return - if not _collective_ops_warning_printed: - print("------------------------------------------------------------------------") - print("WARNING: Using slow fallback implementation for inter-GPU communication.") - print("Please use TensorFlow 1.14 on Linux for optimal training performance.") - print("------------------------------------------------------------------------") - _collective_ops_warning_printed = True - for device in self._devices.values(): - with tf.device(device.name): - combo = [tf.reshape(x, [x.shape.num_elements()]) for x in device.grad_clean.values()] - combo = tf.concat(combo, axis=0) - combo = collective_ops.all_reduce(combo, merge_op='Add', final_op='Id', - group_size=len(self._devices), group_key=_collective_ops_group_key, - instance_key=_collective_ops_instance_key) - cur_ofs = 0 - for var, grad_old in device.grad_clean.items(): - grad_new = tf.reshape(combo[cur_ofs : cur_ofs + grad_old.shape.num_elements()], grad_old.shape) - cur_ofs += grad_old.shape.num_elements() - device.grad_clean[var] = grad_new - _collective_ops_instance_key += 1 - - -class SimpleAdam: - """Simplified version of tf.train.AdamOptimizer that behaves identically when used with dnnlib.tflib.Optimizer.""" - - def __init__(self, name="Adam", learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): - self.name = name - self.learning_rate = learning_rate - self.beta1 = beta1 - self.beta2 = beta2 - self.epsilon = epsilon - self.all_state_vars = [] - - def variables(self): - return self.all_state_vars - - def compute_gradients(self, loss, var_list, gate_gradients=tf.train.Optimizer.GATE_NONE): - assert gate_gradients == tf.train.Optimizer.GATE_NONE - return list(zip(tf.gradients(loss, var_list), var_list)) - - def apply_gradients(self, grads_and_vars): - with tf.name_scope(self.name): - state_vars = [] - update_ops = [] - - # Adjust learning rate to deal with startup bias. - with tf.control_dependencies(None): - b1pow_var = tf.Variable(dtype=tf.float32, initial_value=1, trainable=False) - b2pow_var = tf.Variable(dtype=tf.float32, initial_value=1, trainable=False) - state_vars += [b1pow_var, b2pow_var] - b1pow_new = b1pow_var * self.beta1 - b2pow_new = b2pow_var * self.beta2 - update_ops += [tf.assign(b1pow_var, b1pow_new), tf.assign(b2pow_var, b2pow_new)] - lr_new = self.learning_rate * tf.sqrt(1 - b2pow_new) / (1 - b1pow_new) - - # Construct ops to update each variable. - for grad, var in grads_and_vars: - with tf.control_dependencies(None): - m_var = tf.Variable(dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False) - v_var = tf.Variable(dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False) - state_vars += [m_var, v_var] - m_new = self.beta1 * m_var + (1 - self.beta1) * grad - v_new = self.beta2 * v_var + (1 - self.beta2) * tf.square(grad) - var_delta = lr_new * m_new / (tf.sqrt(v_new) + self.epsilon) - update_ops += [tf.assign(m_var, m_new), tf.assign(v_var, v_new), tf.assign_sub(var, var_delta)] - - # Group everything together. - self.all_state_vars += state_vars - return tf.group(*update_ops) diff --git a/spaces/uparasha/ASRtoTexttoStorytoImagestoVideo/app.py b/spaces/uparasha/ASRtoTexttoStorytoImagestoVideo/app.py deleted file mode 100644 index 802d78aff8e7fa6fc5ed4494c961c6cf4b75cebb..0000000000000000000000000000000000000000 --- a/spaces/uparasha/ASRtoTexttoStorytoImagestoVideo/app.py +++ /dev/null @@ -1,174 +0,0 @@ -import gradio as gr -from transformers import pipeline -import io, base64 -from PIL import Image -import numpy as np -import tensorflow as tf -import mediapy -import os -import sys -from huggingface_hub import snapshot_download - -import streamlit as st -import firebase_admin -from firebase_admin import credentials -from firebase_admin import firestore -import datetime -import tempfile -from typing import Optional -import numpy as np -from TTS.utils.manage import ModelManager -from TTS.utils.synthesizer import Synthesizer - - -# firestore singleton is a cached multiuser instance to persist shared crowdsource memory -@st.experimental_singleton -def get_db_firestore(): - cred = credentials.Certificate('test.json') - firebase_admin.initialize_app(cred, {'projectId': u'clinical-nlp-b9117',}) - db = firestore.client() - return db - -#start firestore singleton -db = get_db_firestore() - -# create ASR ML pipeline -asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h") - -# create Text Classification pipeline -classifier = pipeline("text-classification") - -# create text generator pipeline -story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator") - -# transcribe function -def transcribe(audio): - text = asr(audio)["text"] - return text - -def speech_to_text(speech): - text = asr(speech)["text"] - return text - -def text_to_sentiment(text): - sentiment = classifier(text)[0]["label"] - return sentiment - -def upsert(text): - date_time =str(datetime.datetime.today()) - doc_ref = db.collection('Text2SpeechSentimentSave').document(date_time) - doc_ref.set({u'firefield': 'Recognize Speech', u'first': 'https://huggingface.co/spaces/awacke1/Text2SpeechSentimentSave', u'last': text, u'born': date_time,}) - saved = select('Text2SpeechSentimentSave', date_time) - # check it here: https://console.firebase.google.com/u/0/project/clinical-nlp-b9117/firestore/data/~2FStreamlitSpaces - return saved - -def select(collection, document): - doc_ref = db.collection(collection).document(document) - doc = doc_ref.get() - docid = ("The id is: ", doc.id) - contents = ("The contents are: ", doc.to_dict()) - return contents - -def selectall(text): - docs = db.collection('Text2SpeechSentimentSave').stream() - doclist='' - for doc in docs: - r=(f'{doc.id} => {doc.to_dict()}') - doclist += r - return doclist - -# story gen -def generate_story(choice, input_text): - query = " <{0}> {1}".format(choice, input_text) - generated_text = story_gen(query) - generated_text = generated_text[0]['generated_text'] - generated_text = generated_text.split('> ')[2] - return generated_text - -# images gen -def generate_images(text): - steps=50 - width=256 - height=256 - num_images=4 - diversity=6 - image_bytes = image_gen(text, steps, width, height, num_images, diversity) - generated_images = [] - for image in image_bytes[1]: - image_str = image[0] - image_str = image_str.replace("data:image/png;base64,","") - decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8")) - img = Image.open(io.BytesIO(decoded_bytes)) - generated_images.append(img) - return generated_images - -# reductionism - interpolate 4 images - todo - unhardcode the pattern -def generate_interpolation(gallery): - times_to_interpolate = 4 - generated_images = [] - for image_str in gallery: - image_str = image_str.replace("data:image/png;base64,","") - decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8")) - img = Image.open(io.BytesIO(decoded_bytes)) - generated_images.append(img) - generated_images[0].save('frame_0.png') - generated_images[1].save('frame_1.png') - generated_images[2].save('frame_2.png') - generated_images[3].save('frame_3.png') - input_frames = ["frame_0.png", "frame_1.png", "frame_2.png", "frame_3.png"] - frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator)) - mediapy.write_video("out.mp4", frames, fps=15) - return "out.mp4" - -# image generator -image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion") - -# video generator -os.system("git clone https://github.com/google-research/frame-interpolation") -sys.path.append("frame-interpolation") -from eval import interpolator, util - -ffmpeg_path = util.get_ffmpeg_path() -mediapy.set_ffmpeg(ffmpeg_path) -model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style") -interpolator = interpolator.Interpolator(model, None) - -demo = gr.Blocks() -with demo: - - audio_file = gr.inputs.Audio(source="microphone", type="filepath") - text = gr.Textbox() - label = gr.Label() - saved = gr.Textbox() - savedAll = gr.Textbox() - audio = gr.Audio(label="Output", interactive=False) - - b1 = gr.Button("Recognize Speech") - b2 = gr.Button("Classify Sentiment") - b3 = gr.Button("Save Speech to Text") - b4 = gr.Button("Retrieve All") - - input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre") - input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text") - - gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story! Feel free to edit the generated story afterwards!") - button_gen_story = gr.Button("Generate Story") - gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)") - button_gen_images = gr.Button("Generate Images") - gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!") - button_gen_video = gr.Button("Generate Video") - output_generated_story = gr.Textbox(label="Generated Story") - output_gallery = gr.Gallery(label="Generated Story Images") - output_interpolation = gr.Video(label="Generated Video") - - # Bind functions to buttons - button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story) - button_gen_images.click(fn=generate_images, inputs=output_generated_story, outputs=output_gallery) - button_gen_video.click(fn=generate_interpolation, inputs=output_gallery, outputs=output_interpolation) - - b1.click(speech_to_text, inputs=audio_file, outputs=input_start_text ) - b2.click(text_to_sentiment, inputs=text, outputs=label) - b3.click(upsert, inputs=text, outputs=saved) - b4.click(selectall, inputs=text, outputs=savedAll) - -demo.launch(debug=True, enable_queue=True) \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Cs6 Design And Web Premium Serial Number Crack !!LINK!!.md b/spaces/usbethFlerru/sovits-modelsV2/example/Cs6 Design And Web Premium Serial Number Crack !!LINK!!.md deleted file mode 100644 index efb2dea442ead4d03e670ba6fa0638636bfa3ce7..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Cs6 Design And Web Premium Serial Number Crack !!LINK!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

                cs6 design and web premium serial number crack


                Download File ✓✓✓ https://urlcod.com/2uyXr5



                -
                - d5da3c52bf
                -
                -
                -

                diff --git a/spaces/user238921933/stable-diffusion-webui/modules/timer.py b/spaces/user238921933/stable-diffusion-webui/modules/timer.py deleted file mode 100644 index 8187c28edea3d7ce30d1d8c086a6191eb49d960c..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/modules/timer.py +++ /dev/null @@ -1,35 +0,0 @@ -import time - - -class Timer: - def __init__(self): - self.start = time.time() - self.records = {} - self.total = 0 - - def elapsed(self): - end = time.time() - res = end - self.start - self.start = end - return res - - def record(self, category, extra_time=0): - e = self.elapsed() - if category not in self.records: - self.records[category] = 0 - - self.records[category] += e + extra_time - self.total += e + extra_time - - def summary(self): - res = f"{self.total:.1f}s" - - additions = [x for x in self.records.items() if x[1] >= 0.1] - if not additions: - return res - - res += " (" - res += ", ".join([f"{category}: {time_taken:.1f}s" for category, time_taken in additions]) - res += ")" - - return res diff --git a/spaces/vestacasino/README/README.md b/spaces/vestacasino/README/README.md deleted file mode 100644 index f83982eb986283d5fae73ad328e8b53c9ebafc91..0000000000000000000000000000000000000000 --- a/spaces/vestacasino/README/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Casino Vesta -emoji: 🎰 -colorFrom: red -colorTo: pink -sdk: static -pinned: true ---- - -Las mejores reseñas de casinos en línea de 2022. Lea nuestras revisiones de expertos de los mejores sitios de casino en línea de confianza. Descubra los mejores casinos online 2022. Encuentre los mejores casinos en línea para los jugadores de América Latina y España. Consulte las últimas reseñas de casinos y descubra los bonos y promociones, las ofertas de juegos de tragamonedas o lea las experiencias de otros jugadores. diff --git a/spaces/videfikri/aicover/uvr5_pack/lib_v5/dataset.py b/spaces/videfikri/aicover/uvr5_pack/lib_v5/dataset.py deleted file mode 100644 index ba0e45be1e8878da0b07eb2128e218bbd7de82ef..0000000000000000000000000000000000000000 --- a/spaces/videfikri/aicover/uvr5_pack/lib_v5/dataset.py +++ /dev/null @@ -1,183 +0,0 @@ -import os -import random - -import numpy as np -import torch -import torch.utils.data -from tqdm import tqdm - -from uvr5_pack.lib_v5 import spec_utils - - -class VocalRemoverValidationSet(torch.utils.data.Dataset): - def __init__(self, patch_list): - self.patch_list = patch_list - - def __len__(self): - return len(self.patch_list) - - def __getitem__(self, idx): - path = self.patch_list[idx] - data = np.load(path) - - X, y = data["X"], data["y"] - - X_mag = np.abs(X) - y_mag = np.abs(y) - - return X_mag, y_mag - - -def make_pair(mix_dir, inst_dir): - input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"] - - X_list = sorted( - [ - os.path.join(mix_dir, fname) - for fname in os.listdir(mix_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - y_list = sorted( - [ - os.path.join(inst_dir, fname) - for fname in os.listdir(inst_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - - filelist = list(zip(X_list, y_list)) - - return filelist - - -def train_val_split(dataset_dir, split_mode, val_rate, val_filelist): - if split_mode == "random": - filelist = make_pair( - os.path.join(dataset_dir, "mixtures"), - os.path.join(dataset_dir, "instruments"), - ) - - random.shuffle(filelist) - - if len(val_filelist) == 0: - val_size = int(len(filelist) * val_rate) - train_filelist = filelist[:-val_size] - val_filelist = filelist[-val_size:] - else: - train_filelist = [ - pair for pair in filelist if list(pair) not in val_filelist - ] - elif split_mode == "subdirs": - if len(val_filelist) != 0: - raise ValueError( - "The `val_filelist` option is not available in `subdirs` mode" - ) - - train_filelist = make_pair( - os.path.join(dataset_dir, "training/mixtures"), - os.path.join(dataset_dir, "training/instruments"), - ) - - val_filelist = make_pair( - os.path.join(dataset_dir, "validation/mixtures"), - os.path.join(dataset_dir, "validation/instruments"), - ) - - return train_filelist, val_filelist - - -def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha): - perm = np.random.permutation(len(X)) - for i, idx in enumerate(tqdm(perm)): - if np.random.uniform() < reduction_rate: - y[idx] = spec_utils.reduce_vocal_aggressively( - X[idx], y[idx], reduction_mask - ) - - if np.random.uniform() < 0.5: - # swap channel - X[idx] = X[idx, ::-1] - y[idx] = y[idx, ::-1] - if np.random.uniform() < 0.02: - # mono - X[idx] = X[idx].mean(axis=0, keepdims=True) - y[idx] = y[idx].mean(axis=0, keepdims=True) - if np.random.uniform() < 0.02: - # inst - X[idx] = y[idx] - - if np.random.uniform() < mixup_rate and i < len(perm) - 1: - lam = np.random.beta(mixup_alpha, mixup_alpha) - X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]] - y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]] - - return X, y - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset): - len_dataset = patches * len(filelist) - - X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches) - ends = starts + cropsize - for j in range(patches): - idx = i * patches + j - X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]] - y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]] - - return X_dataset, y_dataset - - -def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset): - patch_list = [] - patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format( - cropsize, sr, hop_length, n_fft, offset - ) - os.makedirs(patch_dir, exist_ok=True) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - basename = os.path.splitext(os.path.basename(X_path))[0] - - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - len_dataset = int(np.ceil(X.shape[2] / roi_size)) - for j in range(len_dataset): - outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j)) - start = j * roi_size - if not os.path.exists(outpath): - np.savez( - outpath, - X=X_pad[:, :, start : start + cropsize], - y=y_pad[:, :, start : start + cropsize], - ) - patch_list.append(outpath) - - return VocalRemoverValidationSet(patch_list) diff --git a/spaces/vishnu0001/text2mesh/shap_e/rendering/blender/constants.py b/spaces/vishnu0001/text2mesh/shap_e/rendering/blender/constants.py deleted file mode 100644 index 14aaf07a562873ab24fbb7b89cf600f176fb5c4d..0000000000000000000000000000000000000000 --- a/spaces/vishnu0001/text2mesh/shap_e/rendering/blender/constants.py +++ /dev/null @@ -1,3 +0,0 @@ -UNIFORM_LIGHT_DIRECTION = [0.09387503, -0.63953443, -0.7630093] -BASIC_AMBIENT_COLOR = 0.3 -BASIC_DIFFUSE_COLOR = 0.7 diff --git a/spaces/vlikhitharaj/mygenAIchatbot/README.md b/spaces/vlikhitharaj/mygenAIchatbot/README.md deleted file mode 100644 index 225a6c05cb9bc174f7c5d902547018ae90593f0d..0000000000000000000000000000000000000000 --- a/spaces/vlikhitharaj/mygenAIchatbot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MygenAIchatbot -emoji: 🐠 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/vslasor/VLS10-VideoAudioSummarizer-GR/summarize.py b/spaces/vslasor/VLS10-VideoAudioSummarizer-GR/summarize.py deleted file mode 100644 index 0053dde4348f24cc152a60c4d20f201e3b1f5482..0000000000000000000000000000000000000000 --- a/spaces/vslasor/VLS10-VideoAudioSummarizer-GR/summarize.py +++ /dev/null @@ -1,43 +0,0 @@ -import traceback -import sys - -from youtube_transcript_api import YouTubeTranscriptApi -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM - -def Summarizer(link, model): - - video_id = link.split("=")[1] - - try: - transcript = YouTubeTranscriptApi.get_transcript(video_id) - FinalTranscript = ' '.join([i['text'] for i in transcript]) - - if model == "Pegasus": - checkpoint = "google/pegasus-large" - elif model == "mT5": - checkpoint = "csebuetnlp/mT5_multilingual_XLSum" - elif model == "BART": - checkpoint = "sshleifer/distilbart-cnn-12-6" - - tokenizer = AutoTokenizer.from_pretrained(checkpoint) - model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) - - - inputs = tokenizer(FinalTranscript, - max_length=1024, - truncation=True, - return_tensors="pt") - - summary_ids = model.generate(inputs["input_ids"]) - summary = tokenizer.batch_decode(summary_ids, - skip_special_tokens=True, - clean_up_tokenization_spaces=False) - - - return summary[0] - - - except Exception: - print(traceback.format_exc()) - # or - print(sys.exc_info()[2]) \ No newline at end of file diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/deform_roi_pool.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/deform_roi_pool.py deleted file mode 100644 index cc245ba91fee252226ba22e76bb94a35db9a629b..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/deform_roi_pool.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from torch import nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['deform_roi_pool_forward', 'deform_roi_pool_backward']) - - -class DeformRoIPoolFunction(Function): - - @staticmethod - def symbolic(g, input, rois, offset, output_size, spatial_scale, - sampling_ratio, gamma): - return g.op( - 'mmcv::MMCVDeformRoIPool', - input, - rois, - offset, - pooled_height_i=output_size[0], - pooled_width_i=output_size[1], - spatial_scale_f=spatial_scale, - sampling_ratio_f=sampling_ratio, - gamma_f=gamma) - - @staticmethod - def forward(ctx, - input, - rois, - offset, - output_size, - spatial_scale=1.0, - sampling_ratio=0, - gamma=0.1): - if offset is None: - offset = input.new_zeros(0) - ctx.output_size = _pair(output_size) - ctx.spatial_scale = float(spatial_scale) - ctx.sampling_ratio = int(sampling_ratio) - ctx.gamma = float(gamma) - - assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!' - - output_shape = (rois.size(0), input.size(1), ctx.output_size[0], - ctx.output_size[1]) - output = input.new_zeros(output_shape) - - ext_module.deform_roi_pool_forward( - input, - rois, - offset, - output, - pooled_height=ctx.output_size[0], - pooled_width=ctx.output_size[1], - spatial_scale=ctx.spatial_scale, - sampling_ratio=ctx.sampling_ratio, - gamma=ctx.gamma) - - ctx.save_for_backward(input, rois, offset) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, rois, offset = ctx.saved_tensors - grad_input = grad_output.new_zeros(input.shape) - grad_offset = grad_output.new_zeros(offset.shape) - - ext_module.deform_roi_pool_backward( - grad_output, - input, - rois, - offset, - grad_input, - grad_offset, - pooled_height=ctx.output_size[0], - pooled_width=ctx.output_size[1], - spatial_scale=ctx.spatial_scale, - sampling_ratio=ctx.sampling_ratio, - gamma=ctx.gamma) - if grad_offset.numel() == 0: - grad_offset = None - return grad_input, None, grad_offset, None, None, None, None - - -deform_roi_pool = DeformRoIPoolFunction.apply - - -class DeformRoIPool(nn.Module): - - def __init__(self, - output_size, - spatial_scale=1.0, - sampling_ratio=0, - gamma=0.1): - super(DeformRoIPool, self).__init__() - self.output_size = _pair(output_size) - self.spatial_scale = float(spatial_scale) - self.sampling_ratio = int(sampling_ratio) - self.gamma = float(gamma) - - def forward(self, input, rois, offset=None): - return deform_roi_pool(input, rois, offset, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - - -class DeformRoIPoolPack(DeformRoIPool): - - def __init__(self, - output_size, - output_channels, - deform_fc_channels=1024, - spatial_scale=1.0, - sampling_ratio=0, - gamma=0.1): - super(DeformRoIPoolPack, self).__init__(output_size, spatial_scale, - sampling_ratio, gamma) - - self.output_channels = output_channels - self.deform_fc_channels = deform_fc_channels - - self.offset_fc = nn.Sequential( - nn.Linear( - self.output_size[0] * self.output_size[1] * - self.output_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, - self.output_size[0] * self.output_size[1] * 2)) - self.offset_fc[-1].weight.data.zero_() - self.offset_fc[-1].bias.data.zero_() - - def forward(self, input, rois): - assert input.size(1) == self.output_channels - x = deform_roi_pool(input, rois, None, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - rois_num = rois.size(0) - offset = self.offset_fc(x.view(rois_num, -1)) - offset = offset.view(rois_num, 2, self.output_size[0], - self.output_size[1]) - return deform_roi_pool(input, rois, offset, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - - -class ModulatedDeformRoIPoolPack(DeformRoIPool): - - def __init__(self, - output_size, - output_channels, - deform_fc_channels=1024, - spatial_scale=1.0, - sampling_ratio=0, - gamma=0.1): - super(ModulatedDeformRoIPoolPack, - self).__init__(output_size, spatial_scale, sampling_ratio, gamma) - - self.output_channels = output_channels - self.deform_fc_channels = deform_fc_channels - - self.offset_fc = nn.Sequential( - nn.Linear( - self.output_size[0] * self.output_size[1] * - self.output_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, - self.output_size[0] * self.output_size[1] * 2)) - self.offset_fc[-1].weight.data.zero_() - self.offset_fc[-1].bias.data.zero_() - - self.mask_fc = nn.Sequential( - nn.Linear( - self.output_size[0] * self.output_size[1] * - self.output_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, - self.output_size[0] * self.output_size[1] * 1), - nn.Sigmoid()) - self.mask_fc[2].weight.data.zero_() - self.mask_fc[2].bias.data.zero_() - - def forward(self, input, rois): - assert input.size(1) == self.output_channels - x = deform_roi_pool(input, rois, None, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - rois_num = rois.size(0) - offset = self.offset_fc(x.view(rois_num, -1)) - offset = offset.view(rois_num, 2, self.output_size[0], - self.output_size[1]) - mask = self.mask_fc(x.view(rois_num, -1)) - mask = mask.view(rois_num, 1, self.output_size[0], self.output_size[1]) - d = deform_roi_pool(input, rois, offset, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - return d * mask diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/utils/parrots_wrapper.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/utils/parrots_wrapper.py deleted file mode 100644 index 93c97640d4b9ed088ca82cfe03e6efebfcfa9dbf..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/utils/parrots_wrapper.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from functools import partial - -import torch - -TORCH_VERSION = torch.__version__ - - -def is_rocm_pytorch() -> bool: - is_rocm = False - if TORCH_VERSION != 'parrots': - try: - from torch.utils.cpp_extension import ROCM_HOME - is_rocm = True if ((torch.version.hip is not None) and - (ROCM_HOME is not None)) else False - except ImportError: - pass - return is_rocm - - -def _get_cuda_home(): - if TORCH_VERSION == 'parrots': - from parrots.utils.build_extension import CUDA_HOME - else: - if is_rocm_pytorch(): - from torch.utils.cpp_extension import ROCM_HOME - CUDA_HOME = ROCM_HOME - else: - from torch.utils.cpp_extension import CUDA_HOME - return CUDA_HOME - - -def get_build_config(): - if TORCH_VERSION == 'parrots': - from parrots.config import get_build_info - return get_build_info() - else: - return torch.__config__.show() - - -def _get_conv(): - if TORCH_VERSION == 'parrots': - from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin - else: - from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin - return _ConvNd, _ConvTransposeMixin - - -def _get_dataloader(): - if TORCH_VERSION == 'parrots': - from torch.utils.data import DataLoader, PoolDataLoader - else: - from torch.utils.data import DataLoader - PoolDataLoader = DataLoader - return DataLoader, PoolDataLoader - - -def _get_extension(): - if TORCH_VERSION == 'parrots': - from parrots.utils.build_extension import BuildExtension, Extension - CppExtension = partial(Extension, cuda=False) - CUDAExtension = partial(Extension, cuda=True) - else: - from torch.utils.cpp_extension import (BuildExtension, CppExtension, - CUDAExtension) - return BuildExtension, CppExtension, CUDAExtension - - -def _get_pool(): - if TORCH_VERSION == 'parrots': - from parrots.nn.modules.pool import (_AdaptiveAvgPoolNd, - _AdaptiveMaxPoolNd, _AvgPoolNd, - _MaxPoolNd) - else: - from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd, - _AdaptiveMaxPoolNd, _AvgPoolNd, - _MaxPoolNd) - return _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd - - -def _get_norm(): - if TORCH_VERSION == 'parrots': - from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm - SyncBatchNorm_ = torch.nn.SyncBatchNorm2d - else: - from torch.nn.modules.instancenorm import _InstanceNorm - from torch.nn.modules.batchnorm import _BatchNorm - SyncBatchNorm_ = torch.nn.SyncBatchNorm - return _BatchNorm, _InstanceNorm, SyncBatchNorm_ - - -_ConvNd, _ConvTransposeMixin = _get_conv() -DataLoader, PoolDataLoader = _get_dataloader() -BuildExtension, CppExtension, CUDAExtension = _get_extension() -_BatchNorm, _InstanceNorm, SyncBatchNorm_ = _get_norm() -_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd = _get_pool() - - -class SyncBatchNorm(SyncBatchNorm_): - - def _check_input_dim(self, input): - if TORCH_VERSION == 'parrots': - if input.dim() < 2: - raise ValueError( - f'expected at least 2D input (got {input.dim()}D input)') - else: - super()._check_input_dim(input) diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/datasets/ade.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/datasets/ade.py deleted file mode 100644 index 5913e43775ed4920b6934c855eb5a37c54218ebf..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/datasets/ade.py +++ /dev/null @@ -1,84 +0,0 @@ -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class ADE20KDataset(CustomDataset): - """ADE20K dataset. - - In segmentation map annotation for ADE20K, 0 stands for background, which - is not included in 150 categories. ``reduce_zero_label`` is fixed to True. - The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to - '.png'. - """ - CLASSES = ( - 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', - 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', - 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', - 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', - 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', - 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', - 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', - 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', - 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', - 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', - 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', - 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', - 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', - 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', - 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', - 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', - 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', - 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', - 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', - 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', - 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', - 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', - 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', - 'clock', 'flag') - - PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], - [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], - [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], - [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], - [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], - [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], - [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], - [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], - [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], - [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], - [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], - [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], - [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], - [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], - [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], - [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], - [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], - [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], - [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], - [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], - [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], - [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], - [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], - [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], - [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], - [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], - [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], - [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], - [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], - [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], - [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], - [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], - [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], - [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], - [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], - [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], - [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], - [102, 255, 0], [92, 0, 255]] - - def __init__(self, **kwargs): - super(ADE20KDataset, self).__init__( - img_suffix='.jpg', - seg_map_suffix='.png', - reduce_zero_label=True, - **kwargs) diff --git a/spaces/willgibs/ControlNet-v1-1/app_mlsd.py b/spaces/willgibs/ControlNet-v1-1/app_mlsd.py deleted file mode 100644 index 073b0da202362716c6af5da7cb929981c78f7f20..0000000000000000000000000000000000000000 --- a/spaces/willgibs/ControlNet-v1-1/app_mlsd.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env python - -import gradio as gr - -from utils import randomize_seed_fn - - -def create_demo(process, max_images=12, default_num_images=3): - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - image = gr.Image() - prompt = gr.Textbox(label='Prompt') - run_button = gr.Button('Run') - with gr.Accordion('Advanced options', open=False): - num_samples = gr.Slider(label='Number of images', - minimum=1, - maximum=max_images, - value=default_num_images, - step=1) - image_resolution = gr.Slider(label='Image resolution', - minimum=256, - maximum=512, - value=512, - step=256) - preprocess_resolution = gr.Slider( - label='Preprocess resolution', - minimum=128, - maximum=512, - value=512, - step=1) - mlsd_value_threshold = gr.Slider( - label='Hough value threshold (MLSD)', - minimum=0.01, - maximum=2.0, - value=0.1, - step=0.01) - mlsd_distance_threshold = gr.Slider( - label='Hough distance threshold (MLSD)', - minimum=0.01, - maximum=20.0, - value=0.1, - step=0.01) - num_steps = gr.Slider(label='Number of steps', - minimum=1, - maximum=100, - value=20, - step=1) - guidance_scale = gr.Slider(label='Guidance scale', - minimum=0.1, - maximum=30.0, - value=9.0, - step=0.1) - seed = gr.Slider(label='Seed', - minimum=0, - maximum=1000000, - step=1, - value=0, - randomize=True) - randomize_seed = gr.Checkbox(label='Randomize seed', - value=True) - a_prompt = gr.Textbox( - label='Additional prompt', - value='best quality, extremely detailed') - n_prompt = gr.Textbox( - label='Negative prompt', - value= - 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality' - ) - with gr.Column(): - result = gr.Gallery(label='Output', show_label=False).style( - columns=2, object_fit='scale-down') - inputs = [ - image, - prompt, - a_prompt, - n_prompt, - num_samples, - image_resolution, - preprocess_resolution, - num_steps, - guidance_scale, - seed, - mlsd_value_threshold, - mlsd_distance_threshold, - ] - prompt.submit( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - queue=False, - ).then( - fn=process, - inputs=inputs, - outputs=result, - ) - run_button.click( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - queue=False, - ).then( - fn=process, - inputs=inputs, - outputs=result, - api_name='mlsd', - ) - return demo - - -if __name__ == '__main__': - from model import Model - model = Model(task_name='MLSD') - demo = create_demo(model.process_mlsd) - demo.queue().launch() diff --git a/spaces/wydgg/bingo-wyd-ai/src/components/ui/voice/index.tsx b/spaces/wydgg/bingo-wyd-ai/src/components/ui/voice/index.tsx deleted file mode 100644 index 4adcb632226bfced8b97092782811edf08b56569..0000000000000000000000000000000000000000 --- a/spaces/wydgg/bingo-wyd-ai/src/components/ui/voice/index.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import './index.scss' - -export interface VoiceProps extends CSSPropertyRule { - num?: number; - duration?: number; -} -export default function Voice({ duration = 400, num = 7, ...others }) { - return ( -
                - {Array.from({ length: num }).map((_, index) => { - const randomDuration = Math.random() * 100 + duration - const initialDelay = Math.random() * 2 * duration - const initialScale = Math.sin((index + 1) * Math.PI / num) - return ( -
                - ) - })} -
                - ) -} diff --git a/spaces/wydgg/bingo-wyd-ai/src/components/user-menu.tsx b/spaces/wydgg/bingo-wyd-ai/src/components/user-menu.tsx deleted file mode 100644 index 9bd1edc9cf9f39b63629b021f0c1186b1a7c1341..0000000000000000000000000000000000000000 --- a/spaces/wydgg/bingo-wyd-ai/src/components/user-menu.tsx +++ /dev/null @@ -1,113 +0,0 @@ -'use client' - -import { useEffect, useState } from 'react' -import Image from 'next/image' -import { toast } from 'react-hot-toast' -import { Button } from '@/components/ui/button' -import pkg from '../../package.json' -import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuSeparator, - DropdownMenuTrigger -} from '@/components/ui/dropdown-menu' -import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons' -import SettingIcon from '@/assets/images/settings.svg' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - -export function UserMenu() { - const [host, setHost] = useState('') - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - useEffect(() => { - setHost(location.host) - }, []) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - return ( -
                - - - - - - - location.href='#dialog="settings"' - } - className="cursor-pointer" - > - 设置用户 - - - - location.href='#dialog="voice"' - } - className="cursor-pointer" - > - 语音设置 - - - - - 开源地址 - - - - - - - - 托管地址 - 🤗 - - - - - - - 复制站点 - - - - - -
                版本信息 {pkg.version}
                -
                - - -
                站点域名
                -
                copyToClipboard(host)} className="flex gap-1 text-xs text-zinc-500 cursor-pointer"> - {host} -
                -
                -
                -
                -
                - ) -} diff --git a/spaces/wzq10314/VITS-Umamusume-voice-synthesizer1/README.md b/spaces/wzq10314/VITS-Umamusume-voice-synthesizer1/README.md deleted file mode 100644 index 1b24e6efdb04cb1460e4fe3257d2303677c5a0e1..0000000000000000000000000000000000000000 --- a/spaces/wzq10314/VITS-Umamusume-voice-synthesizer1/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Multilingual Anime TTS -emoji: 🎙🐴 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.7 -app_file: app.py -pinned: false -duplicated_from: Plachta/VITS-Umamusume-voice-synthesizer ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xiang-wuu/yolov5/utils/plots.py b/spaces/xiang-wuu/yolov5/utils/plots.py deleted file mode 100644 index 53e326c23f6e5b2b7bdc44e06c5da2dcf6fcebc7..0000000000000000000000000000000000000000 --- a/spaces/xiang-wuu/yolov5/utils/plots.py +++ /dev/null @@ -1,489 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Plotting utils -""" - -import math -import os -from copy import copy -from pathlib import Path -from urllib.error import URLError - -import cv2 -import matplotlib -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import seaborn as sn -import torch -from PIL import Image, ImageDraw, ImageFont - -from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, - increment_path, is_ascii, threaded, try_except, xywh2xyxy, xyxy2xywh) -from utils.metrics import fitness - -# Settings -RANK = int(os.getenv('RANK', -1)) -matplotlib.rc('font', **{'size': 11}) -matplotlib.use('Agg') # for writing to files only - - -class Colors: - # Ultralytics color palette https://ultralytics.com/ - def __init__(self): - # hex = matplotlib.colors.TABLEAU_COLORS.values() - hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', - '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') - self.palette = [self.hex2rgb(f'#{c}') for c in hexs] - self.n = len(self.palette) - - def __call__(self, i, bgr=False): - c = self.palette[int(i) % self.n] - return (c[2], c[1], c[0]) if bgr else c - - @staticmethod - def hex2rgb(h): # rgb order (PIL) - return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) - - -colors = Colors() # create instance for 'from utils.plots import colors' - - -def check_pil_font(font=FONT, size=10): - # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary - font = Path(font) - font = font if font.exists() else (CONFIG_DIR / font.name) - try: - return ImageFont.truetype(str(font) if font.exists() else font.name, size) - except Exception: # download if missing - try: - check_font(font) - return ImageFont.truetype(str(font), size) - except TypeError: - check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 - except URLError: # not online - return ImageFont.load_default() - - -class Annotator: - # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations - def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' - non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic - self.pil = pil or non_ascii - if self.pil: # use PIL - self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) - self.draw = ImageDraw.Draw(self.im) - self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, - size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) - else: # use cv2 - self.im = im - self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width - - def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): - # Add one xyxy box to image with label - if self.pil or not is_ascii(label): - self.draw.rectangle(box, width=self.lw, outline=color) # box - if label: - w, h = self.font.getsize(label) # text width, height - outside = box[1] - h >= 0 # label fits outside box - self.draw.rectangle( - (box[0], box[1] - h if outside else box[1], box[0] + w + 1, - box[1] + 1 if outside else box[1] + h + 1), - fill=color, - ) - # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 - self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) - else: # cv2 - p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) - if label: - tf = max(self.lw - 1, 1) # font thickness - w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height - outside = p1[1] - h >= 3 - p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 - cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled - cv2.putText(self.im, - label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), - 0, - self.lw / 3, - txt_color, - thickness=tf, - lineType=cv2.LINE_AA) - - def rectangle(self, xy, fill=None, outline=None, width=1): - # Add rectangle to image (PIL-only) - self.draw.rectangle(xy, fill, outline, width) - - def text(self, xy, text, txt_color=(255, 255, 255)): - # Add text to image (PIL-only) - w, h = self.font.getsize(text) # text width, height - self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) - - def result(self): - # Return annotated image as array - return np.asarray(self.im) - - -def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): - """ - x: Features to be visualized - module_type: Module type - stage: Module stage within model - n: Maximum number of feature maps to plot - save_dir: Directory to save results - """ - if 'Detect' not in module_type: - batch, channels, height, width = x.shape # batch, channels, height, width - if height > 1 and width > 1: - f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename - - blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels - n = min(n, channels) # number of plots - fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols - ax = ax.ravel() - plt.subplots_adjust(wspace=0.05, hspace=0.05) - for i in range(n): - ax[i].imshow(blocks[i].squeeze()) # cmap='gray' - ax[i].axis('off') - - LOGGER.info(f'Saving {f}... ({n}/{channels})') - plt.savefig(f, dpi=300, bbox_inches='tight') - plt.close() - np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save - - -def hist2d(x, y, n=100): - # 2d histogram used in labels.png and evolve.png - xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) - hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) - xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) - yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) - return np.log(hist[xidx, yidx]) - - -def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): - from scipy.signal import butter, filtfilt - - # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy - def butter_lowpass(cutoff, fs, order): - nyq = 0.5 * fs - normal_cutoff = cutoff / nyq - return butter(order, normal_cutoff, btype='low', analog=False) - - b, a = butter_lowpass(cutoff, fs, order=order) - return filtfilt(b, a, data) # forward-backward filter - - -def output_to_target(output): - # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] - targets = [] - for i, o in enumerate(output): - for *box, conf, cls in o.cpu().numpy(): - targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) - return np.array(targets) - - -@threaded -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): - # Plot image grid with labels - if isinstance(images, torch.Tensor): - images = images.cpu().float().numpy() - if isinstance(targets, torch.Tensor): - targets = targets.cpu().numpy() - if np.max(images[0]) <= 1: - images *= 255 # de-normalise (optional) - bs, _, h, w = images.shape # batch size, _, height, width - bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) - - # Build Image - mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init - for i, im in enumerate(images): - if i == max_subplots: # if last batch has fewer images than we expect - break - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - im = im.transpose(1, 2, 0) - mosaic[y:y + h, x:x + w, :] = im - - # Resize (optional) - scale = max_size / ns / max(h, w) - if scale < 1: - h = math.ceil(scale * h) - w = math.ceil(scale * w) - mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) - - # Annotate - fs = int((h + w) * ns * 0.01) # font size - annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) - for i in range(i + 1): - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders - if paths: - annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames - if len(targets) > 0: - ti = targets[targets[:, 0] == i] # image targets - boxes = xywh2xyxy(ti[:, 2:6]).T - classes = ti[:, 1].astype('int') - labels = ti.shape[1] == 6 # labels if no conf column - conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) - - if boxes.shape[1]: - if boxes.max() <= 1.01: # if normalized with tolerance 0.01 - boxes[[0, 2]] *= w # scale to pixels - boxes[[1, 3]] *= h - elif scale < 1: # absolute coords need scale if image scales - boxes *= scale - boxes[[0, 2]] += x - boxes[[1, 3]] += y - for j, box in enumerate(boxes.T.tolist()): - cls = classes[j] - color = colors(cls) - cls = names[cls] if names else cls - if labels or conf[j] > 0.25: # 0.25 conf thresh - label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' - annotator.box_label(box, label, color=color) - annotator.im.save(fname) # save - - -def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): - # Plot LR simulating training for full epochs - optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals - y = [] - for _ in range(epochs): - scheduler.step() - y.append(optimizer.param_groups[0]['lr']) - plt.plot(y, '.-', label='LR') - plt.xlabel('epoch') - plt.ylabel('LR') - plt.grid() - plt.xlim(0, epochs) - plt.ylim(0) - plt.savefig(Path(save_dir) / 'LR.png', dpi=200) - plt.close() - - -def plot_val_txt(): # from utils.plots import *; plot_val() - # Plot val.txt histograms - x = np.loadtxt('val.txt', dtype=np.float32) - box = xyxy2xywh(x[:, :4]) - cx, cy = box[:, 0], box[:, 1] - - fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) - ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) - ax.set_aspect('equal') - plt.savefig('hist2d.png', dpi=300) - - fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) - ax[0].hist(cx, bins=600) - ax[1].hist(cy, bins=600) - plt.savefig('hist1d.png', dpi=200) - - -def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() - # Plot targets.txt histograms - x = np.loadtxt('targets.txt', dtype=np.float32).T - s = ['x targets', 'y targets', 'width targets', 'height targets'] - fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) - ax = ax.ravel() - for i in range(4): - ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') - ax[i].legend() - ax[i].set_title(s[i]) - plt.savefig('targets.jpg', dpi=200) - - -def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() - # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) - save_dir = Path(file).parent if file else Path(dir) - plot2 = False # plot additional results - if plot2: - ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() - - fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) - # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: - for f in sorted(save_dir.glob('study*.txt')): - y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T - x = np.arange(y.shape[1]) if x is None else np.array(x) - if plot2: - s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] - for i in range(7): - ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) - ax[i].set_title(s[i]) - - j = y[3].argmax() + 1 - ax2.plot(y[5, 1:j], - y[3, 1:j] * 1E2, - '.-', - linewidth=2, - markersize=8, - label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) - - ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], - 'k.-', - linewidth=2, - markersize=8, - alpha=.25, - label='EfficientDet') - - ax2.grid(alpha=0.2) - ax2.set_yticks(np.arange(20, 60, 5)) - ax2.set_xlim(0, 57) - ax2.set_ylim(25, 55) - ax2.set_xlabel('GPU Speed (ms/img)') - ax2.set_ylabel('COCO AP val') - ax2.legend(loc='lower right') - f = save_dir / 'study.png' - print(f'Saving {f}...') - plt.savefig(f, dpi=300) - - -@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 -@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 -def plot_labels(labels, names=(), save_dir=Path('')): - # plot dataset labels - LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") - c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes - nc = int(c.max() + 1) # number of classes - x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) - - # seaborn correlogram - sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) - plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) - plt.close() - - # matplotlib labels - matplotlib.use('svg') # faster - ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() - y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - try: # color histogram bars by class - [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 - except Exception: - pass - ax[0].set_ylabel('instances') - if 0 < len(names) < 30: - ax[0].set_xticks(range(len(names))) - ax[0].set_xticklabels(names, rotation=90, fontsize=10) - else: - ax[0].set_xlabel('classes') - sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) - sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) - - # rectangles - labels[:, 1:3] = 0.5 # center - labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 - img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) - for cls, *box in labels[:1000]: - ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot - ax[1].imshow(img) - ax[1].axis('off') - - for a in [0, 1, 2, 3]: - for s in ['top', 'right', 'left', 'bottom']: - ax[a].spines[s].set_visible(False) - - plt.savefig(save_dir / 'labels.jpg', dpi=200) - matplotlib.use('Agg') - plt.close() - - -def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() - # Plot evolve.csv hyp evolution results - evolve_csv = Path(evolve_csv) - data = pd.read_csv(evolve_csv) - keys = [x.strip() for x in data.columns] - x = data.values - f = fitness(x) - j = np.argmax(f) # max fitness index - plt.figure(figsize=(10, 12), tight_layout=True) - matplotlib.rc('font', **{'size': 8}) - print(f'Best results from row {j} of {evolve_csv}:') - for i, k in enumerate(keys[7:]): - v = x[:, 7 + i] - mu = v[j] # best single result - plt.subplot(6, 5, i + 1) - plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') - plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters - if i % 5 != 0: - plt.yticks([]) - print(f'{k:>15}: {mu:.3g}') - f = evolve_csv.with_suffix('.png') # filename - plt.savefig(f, dpi=200) - plt.close() - print(f'Saved {f}') - - -def plot_results(file='path/to/results.csv', dir=''): - # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') - save_dir = Path(file).parent if file else Path(dir) - fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) - ax = ax.ravel() - files = list(save_dir.glob('results*.csv')) - assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' - for f in files: - try: - data = pd.read_csv(f) - s = [x.strip() for x in data.columns] - x = data.values[:, 0] - for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): - y = data.values[:, j].astype('float') - # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) - ax[i].set_title(s[j], fontsize=12) - # if j in [8, 9, 10]: # share train and val loss y axes - # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) - except Exception as e: - LOGGER.info(f'Warning: Plotting error for {f}: {e}') - ax[1].legend() - fig.savefig(save_dir / 'results.png', dpi=200) - plt.close() - - -def profile_idetection(start=0, stop=0, labels=(), save_dir=''): - # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() - ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() - s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] - files = list(Path(save_dir).glob('frames*.txt')) - for fi, f in enumerate(files): - try: - results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows - n = results.shape[1] # number of rows - x = np.arange(start, min(stop, n) if stop else n) - results = results[:, x] - t = (results[0] - results[0].min()) # set t0=0s - results[0] = x - for i, a in enumerate(ax): - if i < len(results): - label = labels[fi] if len(labels) else f.stem.replace('frames_', '') - a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) - a.set_title(s[i]) - a.set_xlabel('time (s)') - # if fi == len(files) - 1: - # a.set_ylim(bottom=0) - for side in ['top', 'right']: - a.spines[side].set_visible(False) - else: - a.remove() - except Exception as e: - print(f'Warning: Plotting error for {f}; {e}') - ax[1].legend() - plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) - - -def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): - # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop - xyxy = torch.tensor(xyxy).view(-1, 4) - b = xyxy2xywh(xyxy) # boxes - if square: - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square - b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad - xyxy = xywh2xyxy(b).long() - clip_coords(xyxy, im.shape) - crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] - if save: - file.parent.mkdir(parents=True, exist_ok=True) # make directory - f = str(increment_path(file).with_suffix('.jpg')) - # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue - Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB - return crop diff --git a/spaces/xiaoyun235/White-box-Cartoonization/README.md b/spaces/xiaoyun235/White-box-Cartoonization/README.md deleted file mode 100644 index 9860239cf42c94e385faaaa75a85311e010d64f7..0000000000000000000000000000000000000000 --- a/spaces/xiaoyun235/White-box-Cartoonization/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -python_version: 3.7 -title: White Box Cartoonization -emoji: 📚 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: hylee/White-box-Cartoonization ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/xswu/HPSv2/src/training/precision.py b/spaces/xswu/HPSv2/src/training/precision.py deleted file mode 100644 index a63b92256518d13afd57261df1568e26b1622201..0000000000000000000000000000000000000000 --- a/spaces/xswu/HPSv2/src/training/precision.py +++ /dev/null @@ -1,12 +0,0 @@ -import torch -from contextlib import suppress - - -def get_autocast(precision): - if precision == 'amp': - return torch.cuda.amp.autocast - elif precision == 'amp_bfloat16' or precision == 'amp_bf16': - # amp_bfloat16 is more stable than amp float16 for clip training - return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16) - else: - return suppress diff --git a/spaces/ybelkada/FocusOnDepth/focusondepth/__init__.py b/spaces/ybelkada/FocusOnDepth/focusondepth/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/TempoGraph/transformEvents.ts b/spaces/yderre-aubay/midi-player-demo/src/main/components/TempoGraph/transformEvents.ts deleted file mode 100644 index 1a3cffcff63d256519566577e33e5413a1413839..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/TempoGraph/transformEvents.ts +++ /dev/null @@ -1,44 +0,0 @@ -import { SetTempoEvent } from "midifile-ts" -import { TrackEvent } from "../../../common/track" -import { TempoCoordTransform } from "../../../common/transform" -import { TempoGraphItem } from "./TempoGraphItem" - -const isSetTempoEvent = (e: any): e is TrackEvent & SetTempoEvent => - e.subtype == "setTempo" - -export const transformEvents = ( - events: TrackEvent[], - transform: TempoCoordTransform, - maxX: number, -): TempoGraphItem[] => { - // まず位置だけ計算する - // Calculate only position - const items = events - .filter(isSetTempoEvent) - .sort((a, b) => a.tick - b.tick) - .map((e) => { - const bpm = (60 * 1000000) / e.microsecondsPerBeat - return { - id: e.id, - x: Math.round(transform.getX(e.tick)), - y: Math.round(transform.getY(bpm)), - microsecondsPerBeat: e.microsecondsPerBeat, - } - }) - - // 次のイベント位置まで延びるように大きさを設定する - // Set size to extend to the next event position - return items.map((e, i) => { - const nextX = i + 1 < items.length ? items[i + 1].x : maxX - return { - id: e.id, - bounds: { - x: e.x, - y: e.y, - width: nextX - e.x, - height: transform.height - e.y + 1, // fit to screen bottom - }, - microsecondsPerBeat: e.microsecondsPerBeat, - } - }) -} diff --git a/spaces/yerfor/SyntaSpeech/utils/text/text_encoder.py b/spaces/yerfor/SyntaSpeech/utils/text/text_encoder.py deleted file mode 100644 index 09555af09720382a795712f0fdd9b711c5b19e02..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/utils/text/text_encoder.py +++ /dev/null @@ -1,263 +0,0 @@ -import json -import re -import six -from six.moves import range # pylint: disable=redefined-builtin - -PAD = "" -EOS = "" -UNK = "" -SEG = "|" -PUNCS = '!,.?;:' -RESERVED_TOKENS = [PAD, EOS, UNK] -NUM_RESERVED_TOKENS = len(RESERVED_TOKENS) -PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0 -EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1 -UNK_ID = RESERVED_TOKENS.index(UNK) # Normally 2 - -if six.PY2: - RESERVED_TOKENS_BYTES = RESERVED_TOKENS -else: - RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")] - -# Regular expression for unescaping token strings. -# '\u' is converted to '_' -# '\\' is converted to '\' -# '\213;' is converted to unichr(213) -_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);") -_ESCAPE_CHARS = set(u"\\_u;0123456789") - - -def strip_ids(ids, ids_to_strip): - """Strip ids_to_strip from the end ids.""" - ids = list(ids) - while ids and ids[-1] in ids_to_strip: - ids.pop() - return ids - - -class TextEncoder(object): - """Base class for converting from ints to/from human readable strings.""" - - def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS): - self._num_reserved_ids = num_reserved_ids - - @property - def num_reserved_ids(self): - return self._num_reserved_ids - - def encode(self, s): - """Transform a human-readable string into a sequence of int ids. - - The ids should be in the range [num_reserved_ids, vocab_size). Ids [0, - num_reserved_ids) are reserved. - - EOS is not appended. - - Args: - s: human-readable string to be converted. - - Returns: - ids: list of integers - """ - return [int(w) + self._num_reserved_ids for w in s.split()] - - def decode(self, ids, strip_extraneous=False): - """Transform a sequence of int ids into a human-readable string. - - EOS is not expected in ids. - - Args: - ids: list of integers to be converted. - strip_extraneous: bool, whether to strip off extraneous tokens - (EOS and PAD). - - Returns: - s: human-readable string. - """ - if strip_extraneous: - ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) - return " ".join(self.decode_list(ids)) - - def decode_list(self, ids): - """Transform a sequence of int ids into a their string versions. - - This method supports transforming individual input/output ids to their - string versions so that sequence to/from text conversions can be visualized - in a human readable format. - - Args: - ids: list of integers to be converted. - - Returns: - strs: list of human-readable string. - """ - decoded_ids = [] - for id_ in ids: - if 0 <= id_ < self._num_reserved_ids: - decoded_ids.append(RESERVED_TOKENS[int(id_)]) - else: - decoded_ids.append(id_ - self._num_reserved_ids) - return [str(d) for d in decoded_ids] - - @property - def vocab_size(self): - raise NotImplementedError() - - -class TokenTextEncoder(TextEncoder): - """Encoder based on a user-supplied vocabulary (file or list).""" - - def __init__(self, - vocab_filename, - reverse=False, - vocab_list=None, - replace_oov=None, - num_reserved_ids=NUM_RESERVED_TOKENS): - """Initialize from a file or list, one token per line. - - Handling of reserved tokens works as follows: - - When initializing from a list, we add reserved tokens to the vocab. - - When initializing from a file, we do not add reserved tokens to the vocab. - - When saving vocab files, we save reserved tokens to the file. - - Args: - vocab_filename: If not None, the full filename to read vocab from. If this - is not None, then vocab_list should be None. - reverse: Boolean indicating if tokens should be reversed during encoding - and decoding. - vocab_list: If not None, a list of elements of the vocabulary. If this is - not None, then vocab_filename should be None. - replace_oov: If not None, every out-of-vocabulary token seen when - encoding will be replaced by this string (which must be in vocab). - num_reserved_ids: Number of IDs to save for reserved tokens like . - """ - super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids) - self._reverse = reverse - self._replace_oov = replace_oov - if vocab_filename: - self._init_vocab_from_file(vocab_filename) - else: - assert vocab_list is not None - self._init_vocab_from_list(vocab_list) - self.pad_index = self.token_to_id[PAD] - self.eos_index = self.token_to_id[EOS] - self.unk_index = self.token_to_id[UNK] - self.seg_index = self.token_to_id[SEG] if SEG in self.token_to_id else self.eos_index - - def encode(self, s): - """Converts a space-separated string of tokens to a list of ids.""" - sentence = s - tokens = sentence.strip().split() - if self._replace_oov is not None: - tokens = [t if t in self.token_to_id else self._replace_oov - for t in tokens] - ret = [self.token_to_id[tok] for tok in tokens] - return ret[::-1] if self._reverse else ret - - def decode(self, ids, strip_eos=False, strip_padding=False): - if strip_padding and self.pad() in list(ids): - pad_pos = list(ids).index(self.pad()) - ids = ids[:pad_pos] - if strip_eos and self.eos() in list(ids): - eos_pos = list(ids).index(self.eos()) - ids = ids[:eos_pos] - return " ".join(self.decode_list(ids)) - - def decode_list(self, ids): - seq = reversed(ids) if self._reverse else ids - return [self._safe_id_to_token(i) for i in seq] - - @property - def vocab_size(self): - return len(self.id_to_token) - - def __len__(self): - return self.vocab_size - - def _safe_id_to_token(self, idx): - return self.id_to_token.get(idx, "ID_%d" % idx) - - def _init_vocab_from_file(self, filename): - """Load vocab from a file. - - Args: - filename: The file to load vocabulary from. - """ - with open(filename) as f: - tokens = [token.strip() for token in f.readlines()] - - def token_gen(): - for token in tokens: - yield token - - self._init_vocab(token_gen(), add_reserved_tokens=False) - - def _init_vocab_from_list(self, vocab_list): - """Initialize tokens from a list of tokens. - - It is ok if reserved tokens appear in the vocab list. They will be - removed. The set of tokens in vocab_list should be unique. - - Args: - vocab_list: A list of tokens. - """ - - def token_gen(): - for token in vocab_list: - if token not in RESERVED_TOKENS: - yield token - - self._init_vocab(token_gen()) - - def _init_vocab(self, token_generator, add_reserved_tokens=True): - """Initialize vocabulary with tokens from token_generator.""" - - self.id_to_token = {} - non_reserved_start_index = 0 - - if add_reserved_tokens: - self.id_to_token.update(enumerate(RESERVED_TOKENS)) - non_reserved_start_index = len(RESERVED_TOKENS) - - self.id_to_token.update( - enumerate(token_generator, start=non_reserved_start_index)) - - # _token_to_id is the reverse of _id_to_token - self.token_to_id = dict((v, k) for k, v in six.iteritems(self.id_to_token)) - - def pad(self): - return self.pad_index - - def eos(self): - return self.eos_index - - def unk(self): - return self.unk_index - - def seg(self): - return self.seg_index - - def store_to_file(self, filename): - """Write vocab file to disk. - - Vocab files have one token per line. The file ends in a newline. Reserved - tokens are written to the vocab file as well. - - Args: - filename: Full path of the file to store the vocab to. - """ - with open(filename, "w") as f: - for i in range(len(self.id_to_token)): - f.write(self.id_to_token[i] + "\n") - - def sil_phonemes(self): - return [p for p in self.id_to_token.values() if is_sil_phoneme(p)] - - -def build_token_encoder(token_list_file): - token_list = json.load(open(token_list_file)) - return TokenTextEncoder(None, vocab_list=token_list, replace_oov='') - - -def is_sil_phoneme(p): - return p == '' or not p[0].isalpha() diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/data/processors/utils.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/data/processors/utils.py deleted file mode 100644 index 936f5a51e9fcf4c4189eb444e567d761e8fa0865..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/data/processors/utils.py +++ /dev/null @@ -1,349 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import csv -import dataclasses -import json -from dataclasses import dataclass -from typing import List, Optional, Union - -from ...utils import is_tf_available, is_torch_available, logging - - -logger = logging.get_logger(__name__) - - -@dataclass -class InputExample: - """ - A single training/test example for simple sequence classification. - - Args: - guid: Unique id for the example. - text_a: string. The untokenized text of the first sequence. For single - sequence tasks, only this sequence must be specified. - text_b: (Optional) string. The untokenized text of the second sequence. - Only must be specified for sequence pair tasks. - label: (Optional) string. The label of the example. This should be - specified for train and dev examples, but not for test examples. - """ - - guid: str - text_a: str - text_b: Optional[str] = None - label: Optional[str] = None - - def to_json_string(self): - """Serializes this instance to a JSON string.""" - return json.dumps(dataclasses.asdict(self), indent=2) + "\n" - - -@dataclass(frozen=True) -class InputFeatures: - """ - A single set of features of data. Property names are the same names as the corresponding inputs to a model. - - Args: - input_ids: Indices of input sequence tokens in the vocabulary. - attention_mask: Mask to avoid performing attention on padding token indices. - Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded) - tokens. - token_type_ids: (Optional) Segment token indices to indicate first and second - portions of the inputs. Only some models use them. - label: (Optional) Label corresponding to the input. Int for classification problems, - float for regression problems. - """ - - input_ids: List[int] - attention_mask: Optional[List[int]] = None - token_type_ids: Optional[List[int]] = None - label: Optional[Union[int, float]] = None - - def to_json_string(self): - """Serializes this instance to a JSON string.""" - return json.dumps(dataclasses.asdict(self)) + "\n" - - -class DataProcessor: - """Base class for data converters for sequence classification data sets.""" - - def get_example_from_tensor_dict(self, tensor_dict): - """ - Gets an example from a dict with tensorflow tensors. - - Args: - tensor_dict: Keys and values should match the corresponding Glue - tensorflow_dataset examples. - """ - raise NotImplementedError() - - def get_train_examples(self, data_dir): - """Gets a collection of [`InputExample`] for the train set.""" - raise NotImplementedError() - - def get_dev_examples(self, data_dir): - """Gets a collection of [`InputExample`] for the dev set.""" - raise NotImplementedError() - - def get_test_examples(self, data_dir): - """Gets a collection of [`InputExample`] for the test set.""" - raise NotImplementedError() - - def get_labels(self): - """Gets the list of labels for this data set.""" - raise NotImplementedError() - - def tfds_map(self, example): - """ - Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts - examples to the correct format. - """ - if len(self.get_labels()) > 1: - example.label = self.get_labels()[int(example.label)] - return example - - @classmethod - def _read_tsv(cls, input_file, quotechar=None): - """Reads a tab separated value file.""" - with open(input_file, "r", encoding="utf-8-sig") as f: - return list(csv.reader(f, delimiter="\t", quotechar=quotechar)) - - -class SingleSentenceClassificationProcessor(DataProcessor): - """Generic processor for a single sentence classification data set.""" - - def __init__(self, labels=None, examples=None, mode="classification", verbose=False): - self.labels = [] if labels is None else labels - self.examples = [] if examples is None else examples - self.mode = mode - self.verbose = verbose - - def __len__(self): - return len(self.examples) - - def __getitem__(self, idx): - if isinstance(idx, slice): - return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx]) - return self.examples[idx] - - @classmethod - def create_from_csv( - cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs - ): - processor = cls(**kwargs) - processor.add_examples_from_csv( - file_name, - split_name=split_name, - column_label=column_label, - column_text=column_text, - column_id=column_id, - skip_first_row=skip_first_row, - overwrite_labels=True, - overwrite_examples=True, - ) - return processor - - @classmethod - def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs): - processor = cls(**kwargs) - processor.add_examples(texts_or_text_and_labels, labels=labels) - return processor - - def add_examples_from_csv( - self, - file_name, - split_name="", - column_label=0, - column_text=1, - column_id=None, - skip_first_row=False, - overwrite_labels=False, - overwrite_examples=False, - ): - lines = self._read_tsv(file_name) - if skip_first_row: - lines = lines[1:] - texts = [] - labels = [] - ids = [] - for i, line in enumerate(lines): - texts.append(line[column_text]) - labels.append(line[column_label]) - if column_id is not None: - ids.append(line[column_id]) - else: - guid = f"{split_name}-{i}" if split_name else str(i) - ids.append(guid) - - return self.add_examples( - texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples - ) - - def add_examples( - self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False - ): - if labels is not None and len(texts_or_text_and_labels) != len(labels): - raise ValueError( - f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}" - ) - if ids is not None and len(texts_or_text_and_labels) != len(ids): - raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}") - if ids is None: - ids = [None] * len(texts_or_text_and_labels) - if labels is None: - labels = [None] * len(texts_or_text_and_labels) - examples = [] - added_labels = set() - for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids): - if isinstance(text_or_text_and_label, (tuple, list)) and label is None: - text, label = text_or_text_and_label - else: - text = text_or_text_and_label - added_labels.add(label) - examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label)) - - # Update examples - if overwrite_examples: - self.examples = examples - else: - self.examples.extend(examples) - - # Update labels - if overwrite_labels: - self.labels = list(added_labels) - else: - self.labels = list(set(self.labels).union(added_labels)) - - return self.examples - - def get_features( - self, - tokenizer, - max_length=None, - pad_on_left=False, - pad_token=0, - mask_padding_with_zero=True, - return_tensors=None, - ): - """ - Convert examples in a list of `InputFeatures` - - Args: - tokenizer: Instance of a tokenizer that will tokenize the examples - max_length: Maximum example length - pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default) - pad_token: Padding token - mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values - and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual - values) - - Returns: - If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the - task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific - `InputFeatures` which can be fed to the model. - - """ - if max_length is None: - max_length = tokenizer.max_len - - label_map = {label: i for i, label in enumerate(self.labels)} - - all_input_ids = [] - for ex_index, example in enumerate(self.examples): - if ex_index % 10000 == 0: - logger.info(f"Tokenizing example {ex_index}") - - input_ids = tokenizer.encode( - example.text_a, - add_special_tokens=True, - max_length=min(max_length, tokenizer.max_len), - ) - all_input_ids.append(input_ids) - - batch_length = max(len(input_ids) for input_ids in all_input_ids) - - features = [] - for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)): - if ex_index % 10000 == 0: - logger.info(f"Writing example {ex_index}/{len(self.examples)}") - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) - - # Zero-pad up to the sequence length. - padding_length = batch_length - len(input_ids) - if pad_on_left: - input_ids = ([pad_token] * padding_length) + input_ids - attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask - else: - input_ids = input_ids + ([pad_token] * padding_length) - attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) - - if len(input_ids) != batch_length: - raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}") - if len(attention_mask) != batch_length: - raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}") - - if self.mode == "classification": - label = label_map[example.label] - elif self.mode == "regression": - label = float(example.label) - else: - raise ValueError(self.mode) - - if ex_index < 5 and self.verbose: - logger.info("*** Example ***") - logger.info(f"guid: {example.guid}") - logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}") - logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}") - logger.info(f"label: {example.label} (id = {label})") - - features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label)) - - if return_tensors is None: - return features - elif return_tensors == "tf": - if not is_tf_available(): - raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported") - import tensorflow as tf - - def gen(): - for ex in features: - yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label) - - dataset = tf.data.Dataset.from_generator( - gen, - ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64), - ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])), - ) - return dataset - elif return_tensors == "pt": - if not is_torch_available(): - raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported") - import torch - from torch.utils.data import TensorDataset - - all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) - all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) - if self.mode == "classification": - all_labels = torch.tensor([f.label for f in features], dtype=torch.long) - elif self.mode == "regression": - all_labels = torch.tensor([f.label for f in features], dtype=torch.float) - - dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels) - return dataset - else: - raise ValueError("return_tensors should be one of 'tf' or 'pt'") diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deprecated/trajectory_transformer/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deprecated/trajectory_transformer/__init__.py deleted file mode 100644 index b7af1bb48cb7d6a495611b0dadfc910779262813..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deprecated/trajectory_transformer/__init__.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_trajectory_transformer": [ - "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "TrajectoryTransformerConfig", - ], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_trajectory_transformer"] = [ - "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", - "TrajectoryTransformerModel", - "TrajectoryTransformerPreTrainedModel", - "load_tf_weights_in_trajectory_transformer", - ] - - -if TYPE_CHECKING: - from .configuration_trajectory_transformer import ( - TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, - TrajectoryTransformerConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_trajectory_transformer import ( - TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, - TrajectoryTransformerModel, - TrajectoryTransformerPreTrainedModel, - load_tf_weights_in_trajectory_transformer, - ) - - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/cluster/__init__.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/cluster/__init__.py deleted file mode 100644 index f1b9bde04e73e9218a5d534227caa4c25332f424..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/cluster/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -import numpy as np -import torch -from sklearn.cluster import KMeans - -def get_cluster_model(ckpt_path): - checkpoint = torch.load(ckpt_path) - kmeans_dict = {} - for spk, ckpt in checkpoint.items(): - km = KMeans(ckpt["n_features_in_"]) - km.__dict__["n_features_in_"] = ckpt["n_features_in_"] - km.__dict__["_n_threads"] = ckpt["_n_threads"] - km.__dict__["cluster_centers_"] = ckpt["cluster_centers_"] - kmeans_dict[spk] = km - return kmeans_dict - -def get_cluster_result(model, x, speaker): - """ - x: np.array [t, 256] - return cluster class result - """ - return model[speaker].predict(x) - -def get_cluster_center_result(model, x,speaker): - """x: np.array [t, 256]""" - predict = model[speaker].predict(x) - return model[speaker].cluster_centers_[predict] - -def get_center(model, x,speaker): - return model[speaker].cluster_centers_[x] diff --git a/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/ContentVec256L9_Onnx.py b/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/ContentVec256L9_Onnx.py deleted file mode 100644 index fae2b928252801795b038f51451b234e007f6f03..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/ContentVec256L9_Onnx.py +++ /dev/null @@ -1,28 +0,0 @@ -from vencoder.encoder import SpeechEncoder -import onnxruntime -import torch - -class ContentVec256L9_Onnx(SpeechEncoder): - def __init__(self,vec_path = "pretrain/vec-256-layer-9.onnx",device=None): - print("load model(s) from {}".format(vec_path)) - self.hidden_dim = 256 - if device is None: - self.dev = torch.device("cpu") - else: - self.dev = torch.device(device) - if device == 'cpu' or device == torch.device("cpu") or device is None: - providers = ['CPUExecutionProvider'] - elif device == 'cuda' or device == torch.device("cuda"): - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - self.model = onnxruntime.InferenceSession(vec_path, providers=providers) - - def encoder(self, wav): - feats = wav - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - feats = feats.unsqueeze(0).cpu().detach().numpy() - onnx_input = {self.model.get_inputs()[0].name: feats} - logits = self.model.run(None, onnx_input) - return torch.tensor(logits[0]).transpose(1, 2).to(self.dev) \ No newline at end of file diff --git a/spaces/ynhe/AskAnything/models/grit_src/grit/data/datasets/object365.py b/spaces/ynhe/AskAnything/models/grit_src/grit/data/datasets/object365.py deleted file mode 100644 index 8b8cc19da23d8397284b50588ee46e750b5b7552..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/grit/data/datasets/object365.py +++ /dev/null @@ -1,111 +0,0 @@ -import logging -import os -from fvcore.common.timer import Timer -from detectron2.structures import BoxMode -from fvcore.common.file_io import PathManager -from detectron2.data import DatasetCatalog, MetadataCatalog -from lvis import LVIS - -logger = logging.getLogger(__name__) - -__all__ = ["load_o365_json", "register_o365_instances"] - - -def register_o365_instances(name, metadata, json_file, image_root): - DatasetCatalog.register(name, lambda: load_o365_json( - json_file, image_root, name)) - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, - evaluator_type="lvis", **metadata - ) - - -def get_o365_meta(): - categories = [{'supercategory': 'object', 'id': 1, 'name': 'object'}] - o365_categories = sorted(categories, key=lambda x: x["id"]) - thing_classes = [k["name"] for k in o365_categories] - meta = {"thing_classes": thing_classes} - return meta - - -def load_o365_json(json_file, image_root, dataset_name=None): - ''' - Load Object365 class name text for object description for GRiT - ''' - - json_file = PathManager.get_local_path(json_file) - - timer = Timer() - lvis_api = LVIS(json_file) - if timer.seconds() > 1: - logger.info("Loading {} takes {:.2f} seconds.".format( - json_file, timer.seconds())) - - class_names = {} - sort_cat = sorted(lvis_api.dataset['categories'], key=lambda x: x['id']) - for x in sort_cat: - if '/' in x['name']: - text = '' - for xx in x['name'].split('/'): - text += xx - text += ' ' - text = text[:-1] - else: - text = x['name'] - class_names[x['id']] = text - - img_ids = sorted(lvis_api.imgs.keys()) - imgs = lvis_api.load_imgs(img_ids) - anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids] - - ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] - assert len(set(ann_ids)) == len(ann_ids), \ - "Annotation ids in '{}' are not unique".format(json_file) - - imgs_anns = list(zip(imgs, anns)) - logger.info("Loaded {} images in the LVIS v1 format from {}".format( - len(imgs_anns), json_file)) - - dataset_dicts = [] - - for (img_dict, anno_dict_list) in imgs_anns: - record = {} - if "file_name" in img_dict: - file_name = img_dict["file_name"] - record["file_name"] = os.path.join(image_root, file_name) - - record["height"] = int(img_dict["height"]) - record["width"] = int(img_dict["width"]) - image_id = record["image_id"] = img_dict["id"] - - objs = [] - for anno in anno_dict_list: - assert anno["image_id"] == image_id - if anno.get('iscrowd', 0) > 0: - continue - obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS} - obj["category_id"] = 0 - obj["object_description"] = class_names[anno['category_id']] - - objs.append(obj) - record["annotations"] = objs - if len(record["annotations"]) == 0: - continue - record["task"] = "ObjectDet" - dataset_dicts.append(record) - - return dataset_dicts - - -_CUSTOM_SPLITS_LVIS = { - "object365_train": ("object365/images/train/", "object365/annotations/train_v1.json"), -} - - -for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS.items(): - register_o365_instances( - key, - get_o365_meta(), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) \ No newline at end of file diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/README.md b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/README.md deleted file mode 100644 index d3e1d5cf533555e19c6326777f792ac82a560a84..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# Probabilistic two-stage detection -Two-stage object detectors that use class-agnostic one-stage detectors as the proposal network. - - -

                - -> [**Probabilistic two-stage detection**](http://arxiv.org/abs/2103.07461), -> Xingyi Zhou, Vladlen Koltun, Philipp Krähenbühl, -> *arXiv technical report ([arXiv 2103.07461](http://arxiv.org/abs/2103.07461))* - -Contact: [zhouxy@cs.utexas.edu](mailto:zhouxy@cs.utexas.edu). Any questions or discussions are welcomed! - -## Abstract - -We develop a probabilistic interpretation of two-stage object detection. We show that this probabilistic interpretation motivates a number of common empirical training practices. It also suggests changes to two-stage detection pipelines. Specifically, the first stage should infer proper object-vs-background likelihoods, which should then inform the overall score of the detector. A standard region proposal network (RPN) cannot infer this likelihood sufficiently well, but many one-stage detectors can. We show how to build a probabilistic two-stage detector from any state-of-the-art one-stage detector. The resulting detectors are faster and more accurate than both their one- and two-stage precursors. Our detector achieves 56.4 mAP on COCO test-dev with single-scale testing, outperforming all published results. Using a lightweight backbone, our detector achieves 49.2 mAP on COCO at 33 fps on a Titan Xp. - -## Summary - -- Two-stage CenterNet: First stage estimates object probabilities, second stage conditionally classifies objects. - -- Resulting detector is faster and more accurate than both traditional two-stage detectors (fewer proposals required), and one-stage detectors (lighter first stage head). - -- Our best model achieves 56.4 mAP on COCO test-dev. - -- This repo also includes a detectron2-based CenterNet implementation with better accuracy (42.5 mAP at 70FPS) and a new FPN version of CenterNet (40.2 mAP with Res50_1x). - -## Main results - -All models are trained with multi-scale training, and tested with a single scale. The FPS is tested on a Titan RTX GPU. -More models and details can be found in the [MODEL_ZOO](projects/CenterNet2/centernet2_docs/MODEL_ZOO.md). - -#### COCO - -| Model | COCO val mAP | FPS | -|-------------------------------------------|---------------|-------| -| CenterNet-S4_DLA_8x | 42.5 | 71 | -| CenterNet2_R50_1x | 42.9 | 24 | -| CenterNet2_X101-DCN_2x | 49.9 | 8 | -| CenterNet2_R2-101-DCN-BiFPN_4x+4x_1560_ST | 56.1 | 5 | -| CenterNet2_DLA-BiFPN-P5_24x_ST | 49.2 | 38 | - - -#### LVIS - -| Model | val mAP box | -| ------------------------- | ----------- | -| CenterNet2_R50_1x | 26.5 | -| CenterNet2_FedLoss_R50_1x | 28.3 | - - -#### Objects365 - -| Model | val mAP | -|-------------------------------------------|----------| -| CenterNet2_R50_1x | 22.6 | - -## Installation - -Our project is developed on [detectron2](https://github.com/facebookresearch/detectron2). Please follow the official detectron2 [installation](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). All our code is under `projects/CenterNet2/`. In theory, you should be able to copy-paste `projects/CenterNet2/` to the latest detectron2 release or your own detectron2 repo to run our project. There might be API changes in future detectron2 releases that make it incompatible. - -We use the default detectron2 demo script. To run inference on an image folder using our pre-trained model, run - -~~~ -python projects/CenterNet2/demo/demo.py --config-file projects/CenterNet2/configs/CenterNet2_R50_1x.yaml --input path/to/image/ --opts MODEL.WEIGHTS models/CenterNet2_R50_1x.pth -~~~ - -## Benchmark evaluation and training - -Please check detectron2 [GETTING_STARTED.md](https://github.com/facebookresearch/detectron2/blob/master/GETTING_STARTED.md) for running evaluation and training. Our config files are under `projects/CenterNet2/configs` and the pre-trained models are in the [MODEL_ZOO](projects/CenterNet2/centernet2_docs/MODEL_ZOO.md). - - -## License - -Our code under `projects/CenterNet2/` is under [Apache 2.0 license](projects/CenterNet2/LICENSE). `projects/CenterNet2/centernet/modeling/backbone/bifpn_fcos.py` are from [AdelaiDet](https://github.com/aim-uofa/AdelaiDet), which follows the original [non-commercial license](https://github.com/aim-uofa/AdelaiDet/blob/master/LICENSE). The code from detectron2 follows the original [Apache 2.0 license](LICENSE). - -## Citation - -If you find this project useful for your research, please use the following BibTeX entry. - - @inproceedings{zhou2021probablistic, - title={Probabilistic two-stage detection}, - author={Zhou, Xingyi and Koltun, Vladlen and Kr{\"a}henb{\"u}hl, Philipp}, - booktitle={arXiv preprint arXiv:2103.07461}, - year={2021} - } diff --git a/spaces/ysharma/Voice-to-jokes/README.md b/spaces/ysharma/Voice-to-jokes/README.md deleted file mode 100644 index 86dec212d5f6b6e6da9218b1c4774b4151deebaa..0000000000000000000000000000000000000000 --- a/spaces/ysharma/Voice-to-jokes/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Voice To Jokes -emoji: 🦀 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ysharma/pix2pix-zero-01/src/inversion.py b/spaces/ysharma/pix2pix-zero-01/src/inversion.py deleted file mode 100644 index 042627823112d97b52d4d99713365a271c3b7b78..0000000000000000000000000000000000000000 --- a/spaces/ysharma/pix2pix-zero-01/src/inversion.py +++ /dev/null @@ -1,68 +0,0 @@ -import os, pdb - -import argparse -import numpy as np -import torch -import requests -from PIL import Image - -from lavis.models import load_model_and_preprocess - -from utils.ddim_inv import DDIMInversion -from utils.scheduler import DDIMInverseScheduler - -if __name__=="__main__": - parser = argparse.ArgumentParser() - parser.add_argument('--input_image', type=str, default='assets/test_images/cat_a.png') - parser.add_argument('--results_folder', type=str, default='output/test_cat') - parser.add_argument('--num_ddim_steps', type=int, default=50) - parser.add_argument('--model_path', type=str, default='CompVis/stable-diffusion-v1-4') - parser.add_argument('--use_float_16', action='store_true') - args = parser.parse_args() - - # make the output folders - os.makedirs(os.path.join(args.results_folder, "inversion"), exist_ok=True) - os.makedirs(os.path.join(args.results_folder, "prompt"), exist_ok=True) - - if args.use_float_16: - torch_dtype = torch.float16 - else: - torch_dtype = torch.float32 - - - # load the BLIP model - model_blip, vis_processors, _ = load_model_and_preprocess(name="blip_caption", model_type="base_coco", is_eval=True, device=torch.device("cuda")) - # make the DDIM inversion pipeline - pipe = DDIMInversion.from_pretrained(args.model_path, torch_dtype=torch_dtype).to("cuda") - pipe.scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config) - - - # if the input is a folder, collect all the images as a list - if os.path.isdir(args.input_image): - l_img_paths = sorted(glob(os.path.join(args.input_image, "*.png"))) - else: - l_img_paths = [args.input_image] - - - for img_path in l_img_paths: - bname = os.path.basename(args.input_image).split(".")[0] - img = Image.open(args.input_image).resize((512,512), Image.Resampling.LANCZOS) - # generate the caption - _image = vis_processors["eval"](img).unsqueeze(0).cuda() - prompt_str = model_blip.generate({"image": _image})[0] - x_inv, x_inv_image, x_dec_img = pipe( - prompt_str, - guidance_scale=1, - num_inversion_steps=args.num_ddim_steps, - img=img, - torch_dtype=torch_dtype - ) - # save the inversion - print("Inside inversion >> save the inversion >>>") - print(os.path.join(args.results_folder, f"inversion/{bname}.pt")) - torch.save(x_inv[0], os.path.join(args.results_folder, f"inversion/{bname}.pt")) - # save the prompt string - print("Inside inversion >> save the prompt string >>>") - print(os.path.join(args.results_folder, f"prompt/{bname}.txt")) - with open(os.path.join(args.results_folder, f"prompt/{bname}.txt"), "w") as f: - f.write(prompt_str) diff --git a/spaces/yukie/yukie-sovits3/train.py b/spaces/yukie/yukie-sovits3/train.py deleted file mode 100644 index 97557410edb18717b0330c602fbaa9984f647b13..0000000000000000000000000000000000000000 --- a/spaces/yukie/yukie-sovits3/train.py +++ /dev/null @@ -1,281 +0,0 @@ -import logging -logging.getLogger('matplotlib').setLevel(logging.WARNING) -import os -import json -import argparse -import itertools -import math -import torch -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler - -import commons -import utils -from data_utils import TextAudioSpeakerLoader, EvalDataLoader -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, -) -from losses import ( - kl_loss, - generator_loss, discriminator_loss, feature_loss -) - -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch - -torch.backends.cudnn.benchmark = True -global_step = 0 - - -# os.environ['TORCH_DISTRIBUTED_DEBUG'] = 'INFO' - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - hps = utils.get_hparams() - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = hps.train.port - - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps) - train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True, - batch_size=hps.train.batch_size) - if rank == 0: - eval_dataset = EvalDataLoader(hps.data.validation_files, hps) - eval_loader = DataLoader(eval_dataset, num_workers=1, shuffle=False, - batch_size=1, pin_memory=False, - drop_last=False) - - net_g = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model).cuda(rank) - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - net_g.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - net_g = DDP(net_g, device_ids=[rank]) # , find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank]) - - try: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, - optim_g) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, - optim_d) - global_step = (epoch_str - 1) * len(train_loader) - except: - epoch_str = 1 - global_step = 0 - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, - [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, - [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d = nets - optim_g, optim_d = optims - scheduler_g, scheduler_d = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - # train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - for batch_idx, items in enumerate(train_loader): - c, f0, spec, y, spk = items - g = spk.cuda(rank, non_blocking=True) - spec, y = spec.cuda(rank, non_blocking=True), y.cuda(rank, non_blocking=True) - c = c.cuda(rank, non_blocking=True) - f0 = f0.cuda(rank, non_blocking=True) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - - with autocast(enabled=hps.train.fp16_run): - y_hat, ids_slice, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(c, f0, spec, g=g, mel=mel) - - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - with autocast(enabled=False): - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, - "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl": loss_kl}) - - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - } - - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict - ) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - with torch.no_grad(): - for batch_idx, items in enumerate(eval_loader): - c, f0, spec, y, spk = items - g = spk[:1].cuda(0) - spec, y = spec[:1].cuda(0), y[:1].cuda(0) - c = c[:1].cuda(0) - f0 = f0[:1].cuda(0) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat = generator.module.infer(c, f0, g=g, mel=mel) - - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - audio_dict.update({ - f"gen/audio_{batch_idx}": y_hat[0], - f"gt/audio_{batch_idx}": y[0] - }) - image_dict.update({ - f"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()), - "gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy()) - }) - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - - -if __name__ == "__main__": - main() diff --git a/spaces/zetavg/LLaMA-LoRA-Tuner-UI-Demo/llama_lora/ui/finetune/data_processing.py b/spaces/zetavg/LLaMA-LoRA-Tuner-UI-Demo/llama_lora/ui/finetune/data_processing.py deleted file mode 100644 index 7dcffb8c02b226e98bb3cedd095b24d9b009d849..0000000000000000000000000000000000000000 --- a/spaces/zetavg/LLaMA-LoRA-Tuner-UI-Demo/llama_lora/ui/finetune/data_processing.py +++ /dev/null @@ -1,74 +0,0 @@ -import json -from ...utils.data import get_dataset_content - -from .values import ( - default_dataset_plain_text_input_variables_separator, - default_dataset_plain_text_input_and_output_separator, - default_dataset_plain_text_data_separator, -) - - -def get_data_from_input(load_dataset_from, dataset_text, dataset_text_format, - dataset_plain_text_input_variables_separator, - dataset_plain_text_input_and_output_separator, - dataset_plain_text_data_separator, - dataset_from_data_dir, prompter): - if load_dataset_from == "Text Input": - if dataset_text_format == "JSON": - data = json.loads(dataset_text) - - elif dataset_text_format == "JSON Lines": - lines = dataset_text.split('\n') - data = [] - for i, line in enumerate(lines): - line_number = i + 1 - try: - data.append(json.loads(line)) - except Exception as e: - raise ValueError( - f"Error parsing JSON on line {line_number}: {e}") - - else: # Plain Text - data = parse_plain_text_input( - dataset_text, - ( - dataset_plain_text_input_variables_separator or - default_dataset_plain_text_input_variables_separator - ).replace("\\n", "\n"), - ( - dataset_plain_text_input_and_output_separator or - default_dataset_plain_text_input_and_output_separator - ).replace("\\n", "\n"), - ( - dataset_plain_text_data_separator or - default_dataset_plain_text_data_separator - ).replace("\\n", "\n"), - prompter.get_variable_names() - ) - - else: # Load dataset from data directory - data = get_dataset_content(dataset_from_data_dir) - - return data - - -def parse_plain_text_input( - value, - variables_separator, input_output_separator, data_separator, - variable_names -): - items = value.split(data_separator) - result = [] - for item in items: - parts = item.split(input_output_separator) - variables = get_val_from_arr(parts, 0, "").split(variables_separator) - variables = [it.strip() for it in variables] - variables_dict = {name: var for name, - var in zip(variable_names, variables)} - output = get_val_from_arr(parts, 1, "").strip() - result.append({'variables': variables_dict, 'output': output}) - return result - - -def get_val_from_arr(arr, index, default=None): - return arr[index] if -len(arr) <= index < len(arr) else default diff --git a/spaces/zideliu/styledrop/timm/loss/jsd.py b/spaces/zideliu/styledrop/timm/loss/jsd.py deleted file mode 100644 index dd64e156c23d27aa03817a587ae367e8175fc126..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/timm/loss/jsd.py +++ /dev/null @@ -1,39 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .cross_entropy import LabelSmoothingCrossEntropy - - -class JsdCrossEntropy(nn.Module): - """ Jensen-Shannon Divergence + Cross-Entropy Loss - - Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py - From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - - https://arxiv.org/abs/1912.02781 - - Hacked together by / Copyright 2020 Ross Wightman - """ - def __init__(self, num_splits=3, alpha=12, smoothing=0.1): - super().__init__() - self.num_splits = num_splits - self.alpha = alpha - if smoothing is not None and smoothing > 0: - self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing) - else: - self.cross_entropy_loss = torch.nn.CrossEntropyLoss() - - def __call__(self, output, target): - split_size = output.shape[0] // self.num_splits - assert split_size * self.num_splits == output.shape[0] - logits_split = torch.split(output, split_size) - - # Cross-entropy is only computed on clean images - loss = self.cross_entropy_loss(logits_split[0], target[:split_size]) - probs = [F.softmax(logits, dim=1) for logits in logits_split] - - # Clamp mixture distribution to avoid exploding KL divergence - logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log() - loss += self.alpha * sum([F.kl_div( - logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs) - return loss diff --git a/spaces/zideliu/styledrop/timm/models/resnest.py b/spaces/zideliu/styledrop/timm/models/resnest.py deleted file mode 100644 index 5a8bb348302956a1578facdef39368c04a376641..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/timm/models/resnest.py +++ /dev/null @@ -1,236 +0,0 @@ -""" ResNeSt Models - -Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 - -Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang - -Modified for torchscript compat, and consistency with timm by Ross Wightman -""" -import torch -from torch import nn - -from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD -from .helpers import build_model_with_cfg -from .layers import SplitAttnConv2d -from .registry import register_model -from .resnet import ResNet - - -def _cfg(url='', **kwargs): - return { - 'url': url, - 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), - 'crop_pct': 0.875, 'interpolation': 'bilinear', - 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, - 'first_conv': 'conv1.0', 'classifier': 'fc', - **kwargs - } - -default_cfgs = { - 'resnest14d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest14-9c8fe254.pth'), - 'resnest26d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest26-50eb607c.pth'), - 'resnest50d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50-528c19ca.pth'), - 'resnest101e': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest101-22405ba7.pth', - input_size=(3, 256, 256), pool_size=(8, 8)), - 'resnest200e': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest200-75117900.pth', - input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'), - 'resnest269e': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest269-0cc87c48.pth', - input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'), - 'resnest50d_4s2x40d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_4s2x40d-41d14ed0.pth', - interpolation='bicubic'), - 'resnest50d_1s4x24d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_1s4x24d-d4a4f76f.pth', - interpolation='bicubic') -} - - -class ResNestBottleneck(nn.Module): - """ResNet Bottleneck - """ - # pylint: disable=unused-argument - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, - radix=1, cardinality=1, base_width=64, avd=False, avd_first=False, is_first=False, - reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, - attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): - super(ResNestBottleneck, self).__init__() - assert reduce_first == 1 # not supported - assert attn_layer is None # not supported - assert aa_layer is None # TODO not yet supported - assert drop_path is None # TODO not yet supported - - group_width = int(planes * (base_width / 64.)) * cardinality - first_dilation = first_dilation or dilation - if avd and (stride > 1 or is_first): - avd_stride = stride - stride = 1 - else: - avd_stride = 0 - self.radix = radix - self.drop_block = drop_block - - self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) - self.bn1 = norm_layer(group_width) - self.act1 = act_layer(inplace=True) - self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None - - if self.radix >= 1: - self.conv2 = SplitAttnConv2d( - group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, - dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_block=drop_block) - self.bn2 = None # FIXME revisit, here to satisfy current torchscript fussyness - self.act2 = None - else: - self.conv2 = nn.Conv2d( - group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, - dilation=first_dilation, groups=cardinality, bias=False) - self.bn2 = norm_layer(group_width) - self.act2 = act_layer(inplace=True) - self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None - - self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False) - self.bn3 = norm_layer(planes*4) - self.act3 = act_layer(inplace=True) - self.downsample = downsample - - def zero_init_last_bn(self): - nn.init.zeros_(self.bn3.weight) - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - if self.drop_block is not None: - out = self.drop_block(out) - out = self.act1(out) - - if self.avd_first is not None: - out = self.avd_first(out) - - out = self.conv2(out) - if self.bn2 is not None: - out = self.bn2(out) - if self.drop_block is not None: - out = self.drop_block(out) - out = self.act2(out) - - if self.avd_last is not None: - out = self.avd_last(out) - - out = self.conv3(out) - out = self.bn3(out) - if self.drop_block is not None: - out = self.drop_block(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.act3(out) - return out - - -def _create_resnest(variant, pretrained=False, **kwargs): - return build_model_with_cfg( - ResNet, variant, default_cfg=default_cfgs[variant], pretrained=pretrained, **kwargs) - - -@register_model -def resnest14d(pretrained=False, **kwargs): - """ ResNeSt-14d model. Weights ported from GluonCV. - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[1, 1, 1, 1], - stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, - block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) - return _create_resnest('resnest14d', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest26d(pretrained=False, **kwargs): - """ ResNeSt-26d model. Weights ported from GluonCV. - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[2, 2, 2, 2], - stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, - block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) - return _create_resnest('resnest26d', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest50d(pretrained=False, **kwargs): - """ ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955 - Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample. - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[3, 4, 6, 3], - stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, - block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) - return _create_resnest('resnest50d', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest101e(pretrained=False, **kwargs): - """ ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955 - Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[3, 4, 23, 3], - stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, - block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) - return _create_resnest('resnest101e', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest200e(pretrained=False, **kwargs): - """ ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955 - Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[3, 24, 36, 3], - stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, - block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) - return _create_resnest('resnest200e', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest269e(pretrained=False, **kwargs): - """ ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955 - Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[3, 30, 48, 8], - stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, - block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) - return _create_resnest('resnest269e', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest50d_4s2x40d(pretrained=False, **kwargs): - """ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[3, 4, 6, 3], - stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2, - block_args=dict(radix=4, avd=True, avd_first=True), **kwargs) - return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest50d_1s4x24d(pretrained=False, **kwargs): - """ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[3, 4, 6, 3], - stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4, - block_args=dict(radix=1, avd=True, avd_first=True), **kwargs) - return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **model_kwargs) diff --git a/spaces/zomehwh/vits-models-pcr/modules.py b/spaces/zomehwh/vits-models-pcr/modules.py deleted file mode 100644 index 56ea4145eddf19dd330a3a41ab0183efc1686d83..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/vits-models-pcr/modules.py +++ /dev/null @@ -1,388 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/zonglin03/White-box-Cartoonization/wbc/cartoonize.py b/spaces/zonglin03/White-box-Cartoonization/wbc/cartoonize.py deleted file mode 100644 index 25faf1ceb95aaed9a3f7a7982d17a03dc6bc32b1..0000000000000000000000000000000000000000 --- a/spaces/zonglin03/White-box-Cartoonization/wbc/cartoonize.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -import cv2 -import numpy as np -import tensorflow as tf -import wbc.network as network -import wbc.guided_filter as guided_filter -from tqdm import tqdm - - -def resize_crop(image): - h, w, c = np.shape(image) - if min(h, w) > 720: - if h > w: - h, w = int(720 * h / w), 720 - else: - h, w = 720, int(720 * w / h) - image = cv2.resize(image, (w, h), - interpolation=cv2.INTER_AREA) - h, w = (h // 8) * 8, (w // 8) * 8 - image = image[:h, :w, :] - return image - - -def cartoonize(load_folder, save_folder, model_path): - print(model_path) - input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - network_out = network.unet_generator(input_photo) - final_out = guided_filter.guided_filter(input_photo, network_out, r=1, eps=5e-3) - - all_vars = tf.trainable_variables() - gene_vars = [var for var in all_vars if 'generator' in var.name] - saver = tf.train.Saver(var_list=gene_vars) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - sess = tf.Session(config=config) - - sess.run(tf.global_variables_initializer()) - saver.restore(sess, tf.train.latest_checkpoint(model_path)) - name_list = os.listdir(load_folder) - for name in tqdm(name_list): - try: - load_path = os.path.join(load_folder, name) - save_path = os.path.join(save_folder, name) - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = sess.run(final_out, feed_dict={input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - -class Cartoonize: - def __init__(self, model_path): - print(model_path) - self.input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - network_out = network.unet_generator(self.input_photo) - self.final_out = guided_filter.guided_filter(self.input_photo, network_out, r=1, eps=5e-3) - - all_vars = tf.trainable_variables() - gene_vars = [var for var in all_vars if 'generator' in var.name] - saver = tf.train.Saver(var_list=gene_vars) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - self.sess = tf.Session(config=config) - - self.sess.run(tf.global_variables_initializer()) - saver.restore(self.sess, tf.train.latest_checkpoint(model_path)) - - def run(self, load_folder, save_folder): - name_list = os.listdir(load_folder) - for name in tqdm(name_list): - try: - load_path = os.path.join(load_folder, name) - save_path = os.path.join(save_folder, name) - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - def run_sigle(self, load_path, save_path): - try: - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - -if __name__ == '__main__': - model_path = 'saved_models' - load_folder = 'test_images' - save_folder = 'cartoonized_images' - if not os.path.exists(save_folder): - os.mkdir(save_folder) - cartoonize(load_folder, save_folder, model_path)