diff --git a/spaces/0x7194633/mbrat-ru-sum/app.py b/spaces/0x7194633/mbrat-ru-sum/app.py deleted file mode 100644 index d54ff626444ea3a7bd7afddb3ddc2f37f5522bb7..0000000000000000000000000000000000000000 --- a/spaces/0x7194633/mbrat-ru-sum/app.py +++ /dev/null @@ -1,13 +0,0 @@ -import gradio as gr -from transformers import MBartTokenizer, MBartForConditionalGeneration - -model_name = "IlyaGusev/mbart_ru_sum_gazeta" -tokenizer = MBartTokenizer.from_pretrained(model_name) -model = MBartForConditionalGeneration.from_pretrained(model_name) - -def summarize(text): - input_ids = tokenizer.batch_encode_plus([text], return_tensors="pt", max_length=1024)["input_ids"].to(model.device) - summary_ids = model.generate(input_ids=input_ids, no_repeat_ngram_size=4) - return tokenizer.decode(summary_ids[0], skip_special_tokens=True) - -gr.Interface(fn=summarize, inputs="text", outputs="text", description="Russian Summarizer").launch() \ No newline at end of file diff --git a/spaces/17TheWord/vits-models/models.py b/spaces/17TheWord/vits-models/models.py deleted file mode 100644 index 8353b867f441de7e4d05aef980e672899c3a8889..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/vits-models/models.py +++ /dev/null @@ -1,533 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gili-sms Full !LINK! Version.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gili-sms Full !LINK! Version.md deleted file mode 100644 index 59c30b4246ca77e7037cb9a39300e2bbfaa82b8d..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gili-sms Full !LINK! Version.md +++ /dev/null @@ -1,106 +0,0 @@ -
-

Gili-sms Full Version: A Powerful Software for Sending and Receiving SMS from Your Computer

-

Do you want to send and receive SMS from your computer without any hassle? Do you want to use SMS for various purposes such as promotion, notification, service, etc.? Do you want to save time and money on your SMS communication? If you answered yes to any of these questions, then you need Gili-sms Full Version.

-

What is Gili-sms and what can it do?

-

Gili-sms is a software that can be used to send and receive SMS from your computer. You just need to connect Gili-sms to a modem or a phone, and then you can send and receive SMS from the interface of Gili-sms. You can send up to 1,600 characters in one long SMS. You can also use Gili-sms for various purposes such as promotion, notification, service, etc.

-

Gili-sms Full Version


Download ○○○ https://byltly.com/2uKvwN



-

Why use Gili-sms for your SMS needs?

-

SMS is one of the most effective ways of communication in today's world. It is fast, reliable, and personal. However, sending and receiving SMS from your phone can be inconvenient, expensive, and limited. That's why you need Gili-sms Full Version. With Gili-sms Full Version, you can:

- -

Features of Gili-sms Full Version

-

How to install and configure Gili-sms on your computer

-

Gili-sms is an application that can be installed and used on Windows 32-bit and 64-bit operating systems. The installation includes the application and the database. You can download Gili-sms Full Version from here. After downloading the file, you need to enter the following key: !-GCCnSwNC60lp4BTB2YU_ZzMHlvmNC9lN944B7Igfxg. Then, you need to follow the instructions on how to install and configure Gili-sms on your computer. You can find the instructions here. The key for the instructions is: !PpOjto4o1IxAXCKKWMjkEJrWrg9bY0nlM0Oi_xWzep4.

-

How to connect Gili-sms to a modem or a phone

-

Gili-sms can be connected to a modem or a phone using a cable or a Bluetooth connection. You can use a GSM internet modem, a GSM SMS modem, or a phone as a modem. You need to insert a SIM card into the modem or the phone. Then, you need to set up the connection settings in Gili-sms according to your device. You can find the guide on how to connect Gili-sms to a modem or a phone here. The key for the guide is: !WV_qvNmcWiHDuo8EfxzM6AlHFUbN-ku0aD8xA3vrsuk.

-

How to send and receive SMS with Gili-sms

-

Sending and receiving SMS with Gili-sms is very easy. You just need to type and send. You can also import contacts from your phone or other sources into Gili-sms. You can create groups of contacts for easier management. You can also schedule messages for later delivery. You can send up to 1,600 characters in one long SMS. You can also use templates and variables for faster and personalized messages. You can also track the status of your messages in real time.

-

How to use Gili-sms for various purposes such as promotion, notification, service, etc.

-

Gili-sms can be used for various purposes such as promotion, notification, service, etc. For example:

- -

You can also customize your messages according to your needs and preferences. You can use different sender names or numbers for different purposes. You can also use different languages or alphabets for different audiences.

-

Benefits of Gili-sms Full Version

-

How Gili-sms can save you time and money

-

Gili-SMS Full Version is very affordable compared to other software or services that offer similar features. You only need to pay once for the software license and then you can use it forever without any additional fees or subscriptions. You also only need to pay for the SIM card that you use for sending and receiving SMS. You don't need to pay for any other hardware or software costs.

-

Gili-sms Full Version free download
-Gili-sms Full Version crack
-Gili-sms Full Version serial key
-Gili-sms Full Version license key
-Gili-sms Full Version activation code
-Gili-sms Full Version registration code
-Gili-sms Full Version keygen
-Gili-sms Full Version patch
-Gili-sms Full Version torrent
-Gili-sms Full Version review
-Gili-sms Full Version features
-Gili-sms Full Version benefits
-Gili-sms Full Version pros and cons
-Gili-sms Full Version comparison
-Gili-sms Full Version alternatives
-Gili-sms Full Version price
-Gili-sms Full Version discount
-Gili-sms Full Version coupon code
-Gili-sms Full Version promo code
-Gili-sms Full Version offer code
-Gili-sms Full Version deal
-Gili-sms Full Version sale
-Gili-sms Full Version refund policy
-Gili-sms Full Version customer service
-Gili-sms Full Version support
-Gili-sms Full Version tutorial
-Gili-sms Full Version guide
-Gili-sms Full Version manual
-Gili-sms Full Version instructions
-Gili-sms Full Version tips and tricks
-Gili-sms Full Version best practices
-Gili-sms Full Version use cases
-Gili-sms Full Version examples
-Gili-sms Full Version testimonials
-Gili-sms Full Version feedbacks
-Gili-sms Full Version ratings
-Gili-sms Full Version comments
-Gili-sms Full Version questions and answers
-Gili-sms Full Version FAQs
-Gili-sms Full Version forum
-Gili-sms Full Version blog posts
-Gili-sms Full Version articles
-Gili-sms Full Version videos
-Gili-sms Full Version podcasts
-Gili-sms Full Version webinars
-Gili-sms Full Version courses
-Gili-sms Full Version ebooks
-Gili-sms Full Version reports
-Gili-sms Full Version whitepapers
-Gili-sms Full Version infographics

-

Gil-SMS Full Version is also very efficient compared to using your phone for sending and receiving SMS. You don't need to type on a small keyboard or screen anymore. You don't need to switch between different apps or devices anymore. You don't need to worry about battery life or signal strength anymore. You can send and receive SMS from your computer with ease.

-

How Gil-SMS can improve your communication and customer satisfaction

-

Gil-SMS Full Version can help you improve your communication and customer satisfaction by allowing you to send personalized messages that are relevant and timely. You can also respond quickly and effectively to any messages that you receive from your customers or public. You can also track the status of your messages in real time so that you know if they are delivered or not.

-

Gil-SMS Full Version can also help you build trust and loyalty with your customers or public by providing them with valuable information that they need or want. You can also show them that you care about them by sending them greetings or wishes on special occasions such as holidays or birthdays.

-

How Gil-SMS can support multiple languages and long messages

-

Gil-SMS Full Version supports multiple languages including English, Indonesian, Arabic, Chinese, Hindi, etc. You can choose the language that suits your audience best. You can also use different alphabets such as Latin, Arabic, Chinese characters etc.

-

Gil-SMS Full Version also supports long messages up to 1,600 characters in one SMS. This means that you don't have to worry about splitting your message into multiple parts anymore. You can write as much as you want without losing any meaning or context.

-

Conclusion

-

for various purposes such as promotion, notification, service, etc. It has many features and benefits that can save you time and money, improve your communication and customer satisfaction, and support multiple languages and long messages. You can download Gil-SMS Full Version from here and start using it today.

-

FAQs

-

What are the system requirements for Gil-SMS?

-

Gil-SMS can be installed and used on Windows 32-bit and 64-bit operating systems from Windows 2000 to Windows 10. You also need a modem or a phone that can be connected to your computer via cable or Bluetooth. You also need a SIM card that has enough credit for sending and receiving SMS.

-

What are the differences between Gil-SMS Full Version and other versions?

-

Gil-SMS Full Version is the most complete and advanced version of Gil-SMS. It has all the features and benefits that are mentioned in this article. Other versions of Gil-SMS may have some limitations or restrictions such as demo version, customized version, etc.

-

How to update Gil-SMS to the latest version?

-

You can update Gil-SMS to the latest version by downloading the update file from here. The update file is only applicable for Gil-SMS Full Version that was released in 2012 or later. You need to backup your database before updating.

-

How to contact the support team of Gil-SMS?

-

You can contact the support team of Gil-SMS by sending an email to support@yusiwa.com or by calling +62 21 7888 9999. You can also visit their website at www.yusiwa.com for more information.

-

Where to find more information about Gil-SMS?

-

You can find more information about Gil-SMS by visiting their website at www.yusiwa.com. You can also read some reviews or testimonials from other users of Gil-SMS on SoundCloud or SoundCloud.

-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Baixe Township com dinheiro infinito e construa sua cidade dos sonhos em 2022.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Baixe Township com dinheiro infinito e construa sua cidade dos sonhos em 2022.md deleted file mode 100644 index ec1101095edb1afe73fea2618b9931fac37f05fa..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Baixe Township com dinheiro infinito e construa sua cidade dos sonhos em 2022.md +++ /dev/null @@ -1,199 +0,0 @@ - -

Township Dinheiro Infinito 2022 Download: How to Get Unlimited Money and Coins in Township

-

Township is one of the most popular casual games on Android and iOS devices. It combines city-building and farming elements, allowing you to create your dream town, harvest crops, process goods, trade with other countries, and more. However, like most free-to-play games, Township also has in-game currency that you need to buy or earn in order to progress faster and unlock more features. In this article, we will show you how to download Township Dinheiro Infinito 2022 Mod APK, a modified version of the game that gives you unlimited money and coins. We will also share some tips and tricks on how to grow and expand your town faster in Township.

-

What is Township and Why You Should Play It

-

Township is a game developed by Playrix, a leading mobile game developer that also created other popular titles such as Gardenscapes, Homescapes, Fishdom, and more. Township was first released in 2012 as a Facebook game, but later expanded to other platforms such as iOS, Android, Windows, Mac, and Amazon. As of 2021, Township has over 100 million downloads on Google Play Store alone, making it one of the most successful games in its genre.

-

township dinheiro infinito 2022 download


Download Filehttps://urlin.us/2uSUUj



-

Township is a unique blend of city-building and farming

-

Unlike other city-building games that focus only on urban development, Township also incorporates farming elements into its gameplay. You start with a small plot of land where you can plant crops such as wheat, corn, carrots, potatoes, etc. You can then use these crops to produce goods in factories such as bread, cheese, sugar, rubber, etc. You can also raise animals such as cows, chickens, pigs, sheep, etc. and collect their products such as milk, eggs, bacon, wool, etc.

-

These goods can then be sold to your townspeople or delivered by helicopter or train to other places in exchange for coins and experience points. Coins are used to buy new buildings, decorations, expansions, etc., while experience points are used to level up and unlock new items. You can also trade with other countries using the port or the airport. By trading with exotic countries, you can get rare goods that you can't produce in your town.

-

Township offers many features and activities to keep you entertained

-

Township is not just about building and farming. There are also many other features and activities that you can enjoy in the game. For example:

- -

There is always something new and exciting to do in Township. You will never get bored of this game.

-

Township has amazing graphics and animations

-

Another reason why you should play Township is that it has amazing graphics and animations. The game has a colorful and cartoonish style that appeals to both kids and adults. The game also has realistic and smooth animations that make the game more lively and fun. You can see your townspeople walking, working, shopping, dancing, etc. You can also see your animals moving, eating, sleeping, etc. You can also interact with your town by tapping on buildings, vehicles, decorations, etc. and see them respond to your actions.

-

Township is a game that will make you feel like you are living in a virtual world. You will be amazed by the details and the quality of the game.

-

township apk mod dinheiro infinito 2022
-township hack dinheiro infinito 2022 download
-township dinheiro infinito 2022 atualizado
-township dinheiro infinito 2022 mediafire
-township dinheiro infinito 2022 mega
-township dinheiro infinito 2022 sem root
-township dinheiro infinito 2022 como instalar
-township dinheiro infinito 2022 para android
-township dinheiro infinito 2022 sinho gamer
-township dinheiro infinito 2022 baixar apk mod
-township dinheiro infinito 2022 link direto
-township dinheiro infinito 2022 funcionando
-township dinheiro infinito 2022 gratis
-township dinheiro infinito 2022 online
-township dinheiro infinito 2022 tutorial
-township dinheiro infinito 2022 site oficial
-township dinheiro infinito 2022 versão mais recente
-township dinheiro infinito 2022 play store
-township dinheiro infinito 2022 jogar agora
-township dinheiro infinito 2022 dicas e truques
-township dinheiro infinito 2022 mod menu
-township dinheiro infinito 2022 youtube
-township dinheiro infinito 2022 google drive
-township dinheiro infinito 2022 dropbox
-township dinheiro infinito 2022 uptodown
-township dinheiro infinito 2022 apk pure
-township dinheiro infinito 2022 happy mod
-township dinheiro infinito 2022 rexdl
-township dinheiro infinito 2022 revdl
-township dinheiro infinito 2022 androeed ru
-township dinheiro infinito 2022 android1 com
-township dinheiro infinito 2022 apk home com
-township dinheiro infinito 2022 apk award com
-township dinheiro infinito 2022 apk done com
-township dinheiro infinito 2022 apk monk com
-township dinheiro infinito 2022 apk mirror com
-township dinheiro infinito 2022 apk mob org
-township dinheiro infinito 2022 apkpure com
-township dinheiro infinito 2022 apkmody io
-township dinheiro infinito 2022 apknite com
-township dinheiro infinito 2022 apksfree com
-township dinheiro infinito 2022 apktada com
-township dinheiro infinito 2022 apktovi com
-township dinheiro infinito 2022 apkturbo com
-township dinheiro infinito 2022 appvn com
-township dinheiro infinito 2022 blackmod net
-township dinheiro infinito 2022 ihackedit com
-township dinheiro infinito 2022 modapkdown com
-township dinheiro infinito 2022 moddroid com

-

How to Download Township Dinheiro Infinito 2022 Mod APK

-

If you want to get unlimited money and coins in Township, you will need to download Township Dinheiro Infinito 2022 Mod APK. This is a modified version of the game that gives you access to a mod menu where you can enable the money and coins hack. With this hack, you can buy anything you want in the game without worrying about running out of resources. You can also enjoy other features such as unlimited cash, unlimited gems, unlimited keys, etc.

-

What is a mod APK and what are the benefits of using it

-

A mod APK is a modified version of an original APK file. An APK file is the file format used by Android devices to install applications. A mod APK is created by modifying the original APK file to change some aspects of the game such as features, functions, graphics, etc. A mod APK can give you advantages over the original game such as:

- -

A mod APK can make your gaming experience more enjoyable and satisfying. However, you should also be aware of the risks of using a mod APK such as:

- -

Therefore, you should always use a mod APK at your own risk and discretion. You should also download a mod APK from a trusted and reliable source.

-

Where to find and download the latest version of Township Dinheiro Infinito 2022 Mod APK

-

If you are looking for a trusted and reliable source to download Township Dinheiro Infinito 2022 Mod APK, you can visit our website [text]. We provide you with the latest version of Township Dinheiro Infinito 2022 Mod APK that is compatible with all Android devices. Our mod APK is also safe and secure to use as we scan it for malware and viruses before uploading it on our website.

-

To download Township Dinheiro Infinito 2022 Mod APK from our website, you need to follow these simple steps:

-
    -
  1. Go to our website [text] and search for Township Dinheiro Infinito 2022 Mod APK.
  2. -
  3. Click on the download button and wait for a few seconds until the download link is generated.
  4. -
  5. Click on the download link and save the mod APK file on your device.
  6. -
-

How to install and run the mod APK on your device

-

After downloading Township Dinheiro Infinito 2022 Mod APK from our website, you need to install and run it on your device. To do that, you need to follow these simple steps:

-
    -
  1. Go to your device settings and enable unknown sources. This will allow you to install applications from sources other than Google Play Store
  2. Locate the mod APK file on your device and tap on it to start the installation process.
  3. -
  4. Follow the instructions on the screen and wait for the installation to complete.
  5. -
  6. Launch the game from your app drawer or home screen and enjoy the mod features.
  7. -
-

Note: You may need to uninstall the original version of Township before installing the mod APK. You may also need to allow some permissions to the mod APK such as storage, location, etc. for it to work properly.

-

How to Use Township Dinheiro Infinito 2022 Mod APK to Get Unlimited Money and Coins

-

Now that you have installed and run Township Dinheiro Infinito 2022 Mod APK on your device, you can use it to get unlimited money and coins in the game. Here is how you can do that:

-

How to access the mod menu and enable the money and coins hack

-

To access the mod menu and enable the money and coins hack, you need to follow these simple steps:

-
    -
  1. Open the game and wait for it to load.
  2. -
  3. Tap on the icon that looks like a gear or a wrench on the top right corner of the screen. This will open the mod menu.
  4. -
  5. Tap on the option that says "Money and Coins Hack". This will enable the hack and give you unlimited money and coins in the game.
  6. -
  7. Tap on the back button or anywhere outside the mod menu to close it.
  8. -
-

You can now see that your money and coins have increased to a huge amount. You can use them to buy anything you want in the game.

-

How to spend your money and coins wisely in the game

-

Although you have unlimited money and coins in the game, you should still spend them wisely and not waste them on unnecessary things. Here are some tips on how to spend your money and coins wisely in the game:

- -

You should also avoid spending your money and coins on things that are not worth it or that will not help you progress faster in the game. For example, you should avoid buying:

- -

How to avoid getting banned or detected by the game developers

-

Although Township Dinheiro Infinito 2022 Mod APK is safe and secure to use, there is still a slight chance that you may get banned or detected by the game developers if you use it too much or too obvious. To avoid getting banned or detected by the game developers, you should follow these tips:

- -

Tips and Tricks to Grow and Expand Your Town Faster in Township

-

Besides using Township Dinheiro Infinito 2022 Mod APK, there are also some tips and tricks that you can use to grow and expand your town faster in Township. Here are some of them:

-

How to maintain a steady production of goods and fulfill orders

-

One of the main tasks in Township is to produce goods and fulfill orders. This will help you earn coins, experience points, and other rewards. To maintain a steady production of goods and fulfill orders, you should:

- -

How to add new buildings and decorations to your town

-

Another main task in Township is to add new buildings and decorations to your town. This will help you increase your population, happiness, income, and production. To add new buildings and decorations to your town, you should:

- -

How to use the train, airplane, and port to trade with other countries

-

Another main task in Township is to use the train, airplane, and port to trade with other countries. This will help you get rare goods that you can't produce in your town. To use the train, airplane, and port to trade with other countries, you should:

- -

How to explore the mine, zoo, and landmarks in your town

-

Another main task in Township is to explore the mine, zoo, and landmarks in your town. This will help you get resources, animals, and artifacts that you can use in your town or trade with other players. To explore the mine, zoo, and landmarks in your town, you should:

- -

How to play with your friends and join clans in the game community

-

Another main task in Township is to play with your friends and join clans in the game community. This will help you socialize, cooperate, and compete with other players from around the world. To play with your friends and join clans in the game community, you should:

- -

Conclusion

-

Township is a fun and addictive game that will keep you entertained for hours. You can build your dream town, farm your crops, produce your goods, trade with other countries, explore the mine, zoo, and landmarks, play with your friends and join clans, and more. However, if you want to get unlimited money and coins in the game, you will need to download Township Dinheiro Infinito 2022 Mod APK from our website [text]. This mod APK will give you access to a mod menu where you can enable the money and coins hack and enjoy other features such as unlimited cash, gems, keys, etc. You can also use our tips and tricks to grow and expand your town faster in Township. We hope you enjoy playing Township Dinheiro Infinito 2022 Mod APK and have a great time!

-

FAQs

-

Here are some frequently asked questions about Township Dinheiro Infinito 2022 Mod APK:

- - - - - - -
Q: Is Township Dinheiro Infinito 2022 Mod APK safe and secure to use?A: Yes, Township Dinheiro Infinito 2022 Mod APK is safe and secure to use as we scan it for malware and viruses before uploading it on our website. However, you should always use it at your own risk and discretion.
Q: Is Township Dinheiro Infinito 2022 Mod APK compatible with all Android devices?A: Yes, Township Dinheiro Infinito 2022 Mod APK is compatible with all Android devices that support the original version of Township. However, you should always check the system requirements of the mod APK before downloading it.
Q: Do I need to root my device to use Township Dinheiro Infinito 2022 Mod APK?A: No, you do not need to root your device to use Township Dinheiro Infinito 2022 Mod APK. You can install and run it on any non-rooted device.
Q: Will I get banned or detected by the game developers if I use Township Dinheiro Infinito 2022 Mod APK?A: There is a slight chance that you may get banned or detected by the game developers if you use Township Dinheiro Infinito 2022 Mod APK too much or too obvious. To avoid getting banned or detected by the game developers, you should follow our tips on how to use the mod APK safely and discreetly.
Q: Can I update Township Dinheiro Infinito 2022 Mod APK when a new version of Township is released?A: Yes, you can update Township Dinheiro Infinito 2022 Mod APK when a new version of Township is released. However, you should always check our website [text] for the latest version of Township Dinheiro Infinito 2022 Mod APK before updating it.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Criminal Case The Conspiracy MOD APK - The Best Way to Play the Game.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Criminal Case The Conspiracy MOD APK - The Best Way to Play the Game.md deleted file mode 100644 index ee866d20a009942088df6c3f53f1a605053119e8..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Criminal Case The Conspiracy MOD APK - The Best Way to Play the Game.md +++ /dev/null @@ -1,92 +0,0 @@ - -

Download Mod Criminal Case: The Conspiracy - How to Play the Thrilling Adventure Game for Free

-

If you are a fan of crime-solving games, you might have heard of Criminal Case: The Conspiracy, a popular hidden object, adventure game developed by Pretty Simple. In this game, you join the Police of Grimsborough once again to solve a series of murder cases in a dark and twisted city. You investigate crime scenes, interrogate suspects, analyze evidence, and catch the killers. But what if you want to play this game without spending any money or waiting for energy refills? Is there a way to enjoy this game for free and with unlimited resources? The answer is yes, if you download mod criminal case the conspiracy. In this article, we will explain what is a mod apk, how to download it, and what are the benefits and risks of using it.

-

What is Criminal Case: The Conspiracy?

-

A captivating hidden object, adventure game

-

Criminal Case: The Conspiracy is the fifth game of the Criminal Case series, which has been downloaded over 100 million times on Google Play and App Store. It is a hidden object, adventure game that puts you in the role of a detective who has to solve various murder cases. You have to find clues in different crime scenes, examine them in the lab, question witnesses and suspects, and use your logic and intuition to identify the killer. You can also team up with other players online and compete for the best scores.

-

download mod criminal case the conspiracy


Download Zip ---> https://urlin.us/2uT2vC



-

A dark and suspenseful storyline

-

The game is set in Grimsborough, a fictional city that has been corrupted by crime and violence. You will encounter many intriguing characters and scenarios as you uncover the secrets and conspiracies behind each murder. The game has a realistic and immersive graphics style that creates a thrilling atmosphere. The game also has a voice-over narration that adds more depth and emotion to the story.

-

A challenging and rewarding gameplay

-

The game is not just about finding objects in a scene. You also have to use your brain and skills to solve puzzles, mini-games, and quizzes that test your knowledge and memory. You also have to make decisions that affect the outcome of the case and your reputation as a detective. The game rewards you with stars, coins, cash, energy, and other items that you can use to customize your avatar, buy new outfits and accessories, unlock new crime scenes, and access more features.

-

What is a mod apk?

-

A modified version of the original app

-

A mod apk is a file that contains a modified version of an original app or game. It is usually created by third-party developers or hackers who want to alter or enhance some aspects of the app or game. For example, a mod apk can remove ads, unlock premium features, increase resources, change graphics, add cheats, etc.

-

A way to unlock premium features and unlimited resources

-

One of the main reasons why people download mod apks is to get access to premium features and unlimited resources that are otherwise not available in the original app or game. For example, if you download mod criminal case the conspiracy, you can enjoy an ad-free experience, increase your energy bar to 170, get unlimited coins and cash, unlock all crime scenes and outfits, etc.

-

A potential risk for your device and data security

-

However, downloading mod apks also comes with some risks. Since they are not authorized by the original developers

of the app or game, they may contain malware, viruses, spyware, or other harmful software that can damage your device or steal your personal data. They may also violate the terms and conditions of the app or game, which can result in your account being banned or suspended. Therefore, you should always be careful and cautious when downloading mod apks from unknown sources.

-

How to download mod criminal case the conspiracy?

-

Find a reliable source of mod apk files

-

The first step to download mod criminal case the conspiracy is to find a trustworthy and reputable website that offers mod apk files for free. You can search online for reviews, ratings, feedback, and comments from other users who have downloaded the same mod apk file. You can also check the file size, version, compatibility, and update date of the mod apk file before downloading it.

-

Enable unknown sources on your device settings

-

The next step is to enable unknown sources on your device settings. This will allow you to install apps or games from sources other than the official Google Play Store or App Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on. You may also need to disable any antivirus or firewall software that may block the installation of mod apks.

-

download mod apk criminal case the conspiracy
-download modded version of criminal case the conspiracy
-download hack mod for criminal case the conspiracy
-download unlimited money mod for criminal case the conspiracy
-download free purchase mod for criminal case the conspiracy
-download latest mod of criminal case the conspiracy
-download mod criminal case the conspiracy android
-download mod criminal case the conspiracy ios
-download mod criminal case the conspiracy for pc
-download mod criminal case the conspiracy online
-download mod criminal case the conspiracy offline
-download mod criminal case the conspiracy 2.36
-download mod criminal case the conspiracy 2.37
-download mod criminal case the conspiracy 2.38
-download mod criminal case the conspiracy 2.39
-download mod criminal case the conspiracy happymod[^1^]
-download mod criminal case the conspiracy apkdone[^1^]
-download mod criminal case the conspiracy apkhome[^1^]
-download mod criminal case the conspiracy apk4all[^1^]
-download mod criminal case the conspiracy apkpure[^1^]
-download mod criminal case the conspiracy rexdl[^1^]
-download mod criminal case the conspiracy revdl[^1^]
-download mod criminal case the conspiracy an1[^1^]
-download mod criminal case the conspiracy android1[^1^]
-download mod criminal case the conspiracy androidp1[^1^]
-download mod criminal case the conspiracy apkmodhub[^1^]
-download mod criminal case the conspiracy apkmodpro[^1^]
-download mod criminal case the conspiracy apkmodmania[^1^]
-download mod criminal case the conspiracy apkmodone[^1^]
-download mod criminal case the conspiracy apkmodplus[^1^]
-how to download mod criminal case the conspiracy
-where to download mod criminal case the conspiracy
-why to download mod criminal case the conspiracy
-what is mod criminal case the conspiracy
-who made mod criminal case the conspiracy
-features of mod criminal case the conspiracy
-benefits of mod criminal case the conspiracy
-drawbacks of mod criminal case the conspiracy
-reviews of mod criminal case the conspiracy
-ratings of mod criminal case the conspiracy
-gameplay of mod criminal case the conspiracy
-walkthrough of mod criminal case the conspiracy
-tips and tricks for mod criminal case the conspiracy
-cheats and codes for mod criminal case the conspiracy
-guide and tutorial for mod criminal case the conspiracy
-faq and support for mod criminal case the conspiracy
-updates and news for mod criminal case the conspiracy
-alternatives and similar apps to mod criminal case the conspiracy

-

Install the mod apk file and launch the game

-

The final step is to install the mod apk file and launch the game. To do this, locate the downloaded mod apk file on your device storage, tap on it, and follow the instructions on the screen. Once the installation is complete, you can open the game and enjoy playing it with all the premium features and unlimited resources unlocked.

-

Conclusion

-

Summarize the main points of the article

-

In conclusion, Criminal Case: The Conspiracy is a hidden object, adventure game that lets you solve murder cases in a corrupt city. You can download mod criminal case the conspiracy to play this game for free and with unlimited resources. However, you should be aware of the risks and consequences of using mod apks from unknown sources. You should also respect the original developers and support their work by purchasing the official app or game if you like it.

-

Provide a call to action for the readers

-

If you are ready to download mod criminal case the conspiracy and start your thrilling adventure as a detective, you can click on the link below and follow the steps we have explained in this article. But remember, download at your own risk and discretion. Have fun and good luck!

-

Download Mod Criminal Case: The Conspiracy Here

-

FAQs

-

Q: Is Criminal Case: The Conspiracy free to play?

-

A: Yes, Criminal Case: The Conspiracy is free to play on Google Play and App Store. However, some features and items may require in-app purchases or watching ads.

-

Q: How many cases are there in Criminal Case: The Conspiracy?

-

A: There are 60 cases in Criminal Case: The Conspiracy, divided into six districts: Fairview, Money Mile, The Greens, Old Town, Maple Heights, and Misty Grove.

-

Q: How can I get more energy in Criminal Case: The Conspiracy?

-

A: There are several ways to get more energy in Criminal Case: The Conspiracy. You can wait for it to regenerate over time, buy it with coins or cash, watch ads, collect daily bonuses, complete achievements, send and receive energy from your friends, or use boosters.

-

Q: What are the benefits of using a mod apk?

-

A: The benefits of using a mod apk are that you can unlock premium features and unlimited resources that are not available in the original app or game. For example, you can remove ads, increase your energy bar, get unlimited coins and cash, unlock all crime scenes and outfits, etc.

-

Q: What are the risks of using a mod apk?

-

A: The risks of using a mod apk are that you may expose your device and data to malware, viruses, spyware, or other harmful software that can damage or steal them. You may also violate the terms and conditions of the original app or game, which can result in your account being banned or suspended.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download E-Aadhaar Online Step by Step Guide.md b/spaces/1phancelerku/anime-remove-background/Download E-Aadhaar Online Step by Step Guide.md deleted file mode 100644 index d4aaf0fdd7a8acefdf7ed6f907b01086a3532351..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download E-Aadhaar Online Step by Step Guide.md +++ /dev/null @@ -1,104 +0,0 @@ -
-
- Steps to download e-Aadhaar using Aadhaar number, enrolment ID or virtual ID
- How to open and print e-Aadhaar PDF file | | H2: Benefits of e-Aadhaar | - Validity and acceptance of e-Aadhaar as a proof of identity and address
- Convenience and portability of e-Aadhaar
- Security and privacy of e-Aadhaar | | H2: FAQs on e-Aadhaar | - How to update details in e-Aadhaar?
- How to get Aadhaar PVC card?
- How to check Aadhaar authentication history?
- How to lock and unlock Aadhaar biometrics?
- How to verify Aadhaar number and email/mobile number? | Table 2: Article with HTML formatting

What is e-Aadhaar and how to download it online?

-

E-Aadhaar is an electronic version of your Aadhaar card that you can download from the official website of the Unique Identification Authority of India (UIDAI) . E-Aadhaar is as valid as the physical copy of Aadhaar and can be used as a proof of identity and address for various purposes . E-Aadhaar contains your name, address, gender, date of birth, photo, biometrics, mobile number, email ID and Aadhaar number.

-

e aadhar download online


Download Ziphttps://jinyurl.com/2uNOuP



-

In this article, we will explain how you can download your e-Aadhaar online using your Aadhaar number, enrolment ID or virtual ID. We will also tell you how to open and print your e-Aadhaar PDF file. Finally, we will discuss the benefits of e-Aadhaar and answer some frequently asked questions on e-Aadhaar.

-

Steps to download e-Aadhaar using Aadhaar number, enrolment ID or virtual ID

-

To download your e-Aadhaar online, you need to follow these steps:

-
    -
  1. Visit the UIDAI website or click on "Download Aadhaar" option from My Aadhaar menu or visit the link .
  2. -
  3. Select one of the options: Aadhaar number, enrolment ID or virtual ID.
  4. -
  5. Enter your 12-digit Aadhaar number, 14-digit enrolment ID or 16-digit virtual ID.
  6. -
  7. Select "Regular Aadhaar" or "Masked Aadhaar". Masked Aadhaar will hide the first 8 digits of your Aadhaar number for extra security.
  8. -
  9. Enter the captcha verification code and click on "Send OTP" or "Enter a TOTP" if you have the mAadhaar app .
  10. -
  11. Enter the OTP or TOTP received on your registered mobile number or email ID.
  12. -
  13. Click on "Verify and Download" to download your e-Aadhaar PDF file.
  14. -
-

How to open and print e-Aadhaar PDF file

-

To open your e-Aadhaar PDF file, you need to enter a password. The password is an 8-character combination of the first four letters of your name (in capital letters) and your year of birth (in YYYY format). For example, if your name is Ravi Kumar and your year of birth is 1990, then your password is RAVI1990 .

-

To print your e-Aadhaar PDF file, you need to have a printer connected to your device. You can use the print option in the PDF viewer or press Ctrl+P to print your e-Aadhaar. You can also save your e-Aadhaar PDF file in a pen drive or email it to yourself for future use.

-

How to download e aadhar card by name and date of birth
-E aadhar card download online with mobile number
-E aadhar card download online without otp
-E aadhar card download online pdf password
-E aadhar card download online status check
-E aadhar card download online using enrolment number
-E aadhar card download online kaise kare
-E aadhar card download online tamil nadu
-E aadhar card download online update
-E aadhar card download online print out
-E aadhar card download online correction
-E aadhar card download online apply
-E aadhar card download online verification
-E aadhar card download online link
-E aadhar card download online portal
-E aadhar card download online registration
-E aadhar card download online login
-E aadhar card download online app
-E aadhar card download online free
-E aadhar card download online by fingerprint
-E aadhar card download online by email id
-E aadhar card download online by qr code
-E aadhar card download online by face authentication
-E aadhar card download online by address proof
-E aadhar card download online by pan number
-E aadhar card download online by voter id
-E aadhar card download online by bank account number
-E aadhar card download online by mobile app
-E aadhar card download online by sms
-E aadhar card download online by call center
-E aadhar card download online by post office
-E aadhar card download online by uidai website
-E aadhar card download online by umang app
-E aadhar card download online by digilocker app
-E aadhar card download online by mAadhaar app
-E aadhar card download online by paytm app
-E aadhar card download online by google pay app
-E aadhar card download online by phonepe app
-E aadhar card download online by amazon pay app
-E aadhar card download online by flipkart app
-Benefits of e aadhar card download online
-Steps to e aadhar card download online
-FAQs on e aadhar card download online
-Tips for e aadhar card download online
-Problems with e aadhar card download online
-Solutions for e aadhar card download online
-Reviews of e aadhar card download online
-Features of e aadhar card download online
-Advantages of e aadhar card download online

-

Benefits of e-Aadhaar

-

E-Aadhaar has many benefits over the physical copy of Aadhaar. Some of them are:

- -

FAQs on e-Aadhaar

-

Here are some of the common questions and answers on e-Aadhaar:

-

How to update details in e-Aadhaar?

-

If you want to update your name, address, gender, date of birth, mobile number or email ID in your e-Aadhaar, you can do so online or offline. For online update, you need to visit the UIDAI website or click on "Update your address online" option from My Aadhaar menu or visit the link . You need to login with your Aadhaar number and OTP or TOTP and upload the relevant documents for verification. For offline update, you need to visit an Aadhaar enrolment center and fill an Aadhaar update form and submit the required documents and biometrics.

-

How to get Aadhaar PVC card?

-

Aadhaar PVC card is a durable and convenient form of Aadhaar that you can order online from the UIDAI website or click on "Order Aadhaar PVC Card" option from My Aadhaar menu or visit the link . You need to enter your Aadhaar number, virtual ID or enrolment ID and pay a nominal fee of Rs. 50 (inclusive of GST and speed post charges). You will receive your Aadhaar PVC card within 15 days at your registered address.

-

How to check Aadhaar authentication history?

-

Aadhaar authentication history is a record of the transactions where you have used your Aadhaar for verification. You can check your Aadhaar authentication history online from the UIDAI website or click on "Aadhaar Authentication History" option from My Aadhaar menu or visit the link . You need to enter your Aadhaar number and OTP or TOTP and select the date range and authentication type. You will see the details of the authentication agencies, date, time and status of your Aadhaar authentication transactions.

-

How to lock and unlock Aadhaar biometrics?

-

Aadhaar biometrics are your fingerprints and iris scans that are used for verifying your identity. You can lock and unlock your Aadhaar biometrics online from the UIDAI website or click on "Lock/Unlock Biometrics" option from My Aadhaar menu or visit the link . You need to enter your Aadhaar number and OTP or TOTP and click on "Enable" or "Disable" button. When you lock your biometrics, you cannot use them for authentication until you unlock them. This feature enhances the security of your biometrics.

-

How to verify Aadhaar number and email/mobile number?

-

You can verify your Aadhaar number and email/mobile number online from the UIDAI website or click on "Verify an Aadhaar Number" option from My Aadhaar menu or visit the link . You need to enter the Aadhaar number that you want to verify and click on "Proceed to verify". You will see the details of the Aadhaar holder such as name, gender, age band, state, etc. You can also verify your email/mobile number by clicking on "Verify Email/Mobile Number" option from My Aadhar menu or visit the link . You need to enter your email/mobile number and OTP or TOTP and click on "Verify". You will see a confirmation message if your email/mobile number is registered with UIDAI.

-

Conclusion

-

E-Aadhaar is a digital form of your Aadhaar card that you can download online from the UIDAI website using your Aadhaar number, enrolment ID or virtual ID. E-Aadhaar is as valid as the physical copy of Aadhaar and can be used as a proof of identity and address for various purposes. E-Aadhaar has many benefits such as validity, convenience, portability, security and privacy. To download your e-Aadhaar online, you need to have a registered mobile number or email ID with UIDAI. If you don't have one, you can update it at an Aadhaar enrolment center. You can also check the status of your Aadhaar generation or update online from the UIDAI website.

-

We hope this article has helped you understand what is e-Aadhaar and how to download it online. If you have any queries or feedback, please feel free to contact us or leave a comment below.

-

FAQs

-

Here are some of the common questions and answers on e-Aadhaar:

-

What is the difference between e-Aadhaar and m-Aadhaar?

-

E-Aadhaar is an electronic version of your Aadhaar card that you can download from the UIDAI website as a PDF file. M-Aadhaar is a mobile app that allows you to access your Aadhaar details on your smartphone. You can also use m-Aadhaar as a proof of identity and address for various purposes. You can download m-Aadhaar from Google Play Store or App Store .

-

Is e-Aadhaar valid for travel?

-

Yes, e-Aadhaar is valid for travel within India. You can use it as a proof of identity and address for domestic flights, trains, buses, etc. However, for international travel, you need to have a passport or other valid documents.

-

How to get e-Aadhaar without mobile number?

-

If you don't have a registered mobile number with UIDAI, you cannot download your e-Aadhaar online. You need to visit an Aadhaar enrolment center and update your mobile number. Alternatively, you can also request for a physical copy of Aadhaar by post from the UIDAI website or click on "Order Aadhaar Reprint" option from My Aadhaar menu or visit the link . You need to pay a fee of Rs. 50 (inclusive of GST and speed post charges) and enter your Aadhaar number and captcha verification code. You will receive your Aadhaar letter within 15 days at your registered address.

-

How to get e-Aadhaar with name and date of birth?

-

If you don't remember your Aadhaar number, enrolment ID or virtual ID, you can still download your e-Aadhaar online using your name and date of birth. You need to visit the UIDAI website or click on "Retrieve Lost or Forgotten EID/UID" option from My Aadhaar menu or visit the link . You need to enter your full name, email ID or mobile number and captcha verification code. You will receive an OTP or TOTP on your registered email ID or mobile number. You need to enter the OTP or TOTP and click on "Verify OTP". You will see your Aadhaar number or enrolment ID on the screen. You can use it to download your e-Aadhaar as explained above.

-

How to get e-Aadhaar with fingerprint?

-

If you want to download your e-Aadhaar using your fingerprint, you need to have a biometric device that is compatible with the UIDAI website. You need to visit the UIDAI website or click on "Download Aadhaar" option from My Aadhaar menu or visit the link . You need to select "Aadhaar Number" option and enter your 12-digit Aadhaar number. You need to select "Regular Aadhaar" or "Masked Aadhaar". You need to enter the captcha verification code and click on "Use Fingerprint". You need to place your finger on the biometric device and scan it. You will receive an OTP on your registered mobile number or email ID. You need to enter the OTP and click on "Verify and Download" to download your e-Aadhaar PDF file.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Who Wants to Be a Millionaire Game for PC and Win Big Prizes.md b/spaces/1phancelerku/anime-remove-background/Download Who Wants to Be a Millionaire Game for PC and Win Big Prizes.md deleted file mode 100644 index 2ecfd6d781b6a42f94f8cc98cfe07c852557565d..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Who Wants to Be a Millionaire Game for PC and Win Big Prizes.md +++ /dev/null @@ -1,104 +0,0 @@ -
-

How to Download Who Wants to Be a Millionaire Game for PC

-

Who Wants to Be a Millionaire is a popular trivia game show that challenges your general knowledge and rewards you with cash prizes. If you have ever dreamed of being on the show, you can now experience it on your PC with the official game. In this article, we will show you how to download who wants to be a millionaire game for pc, what are the features of the game, and some tips and tricks for playing it.

-

Requirements for Downloading the Game

-

Before you download who wants to be a millionaire game for pc, you need to make sure that your PC meets the minimum system requirements. According to Steam, these are:

-

download who wants to be a millionaire game for pc


Download ★★★ https://jinyurl.com/2uNOYl



- -

If your PC meets these requirements, you can proceed to download who wants to be a millionaire game for pc from one of the platforms below.

-

How to Download the Game from Steam

-

Steam is one of the most popular platforms for buying and playing PC games. It offers a large library of games, frequent sales and discounts, and a user-friendly interface. To download who wants to be a millionaire game for pc from Steam, follow these steps:

-

download who wants to be a millionaire steam game
-who wants to be a millionaire pc game free download
-how to download who wants to be a millionaire on windows 10
-who wants to be a millionaire game download for mac
-download who wants to be a millionaire deluxe edition pc
-who wants to be a millionaire pc game online
-where can I download who wants to be a millionaire game
-download who wants to be a millionaire trivia game for pc
-who wants to be a millionaire pc game full version download
-download who wants to be a millionaire party game for pc
-who wants to be a millionaire pc game crack download
-download who wants to be a millionaire multiplayer game for pc
-who wants to be a millionaire pc game 2020 download
-download who wants to be a millionaire co-op game for pc
-who wants to be a millionaire pc game torrent download
-download who wants to be a millionaire casual game for pc
-who wants to be a millionaire pc game system requirements
-download who wants to be a millionaire family mode game for pc
-who wants to be a millionaire pc game review
-download who wants to be a millionaire battle royale game for pc
-who wants to be a millionaire pc game price
-download who wants to be a millionaire taking turns game for pc
-who wants to be a millionaire pc game steam key
-download who wants to be a millionaire free-for-all game for pc
-who wants to be a millionaire pc game questions and answers
-download who wants to be a millionaire uk edition pc game
-who wants to be a millionaire pc game cheats and tips
-download who wants to be a millionaire usa edition pc game
-who wants to be a millionaire pc game gameplay and features
-download who wants to be a millionaire spain edition pc game
-who wants to be a millionaire pc game ratings and feedbacks
-download who wants to be a millionaire italy edition pc game
-who wants to be a millionaire pc game updates and patches
-download who wants to be a millionaire france edition pc game
-who wants to be a millionaire pc game mods and customizations
-download who wants to be a millionaire germany edition pc game
-who wants to be a millionaire pc game achievements and trophies
-download who wants to be a millionaire neurons pack for pc game
-who wants to be a millionaire pc game soundtracks and voiceovers
-download who wants to be a millionaire deluxe upgrade for pc game

-
    -
  1. Create a Steam account or log in to your existing one. You can do this by visiting https://store.steampowered.com/join/ or by downloading and installing the Steam client on your PC.
  2. -
  3. Search for who wants to be a millionaire game on the Steam store or click on this link. You will see the game page with its description, screenshots, videos, reviews, and price.
  4. -
  5. Add the game to your cart and proceed to checkout. You can pay with various methods such as credit card, PayPal, or Steam Wallet. You will also receive an email confirmation of your purchase.
  6. -
  7. Install the game on your PC and launch it from your Steam library. You can do this by clicking on the "Library" tab on the Steam client and selecting the game from your list of games. You can also create a shortcut on your desktop for easy access.
  8. -
-

Congratulations, you have successfully downloaded who wants to be a millionaire game for pc from Steam. You can now enjoy playing the game and testing your knowledge.

-

How to Download the Game from Other Platforms

-

If you prefer to download who wants to be a millionaire game for pc from other platforms, you have some alternatives. Here are some of them:

- -

These are some of the platforms that you can use to download who wants to be a millionaire game for pc. However, be careful when buying from third-party sites and make sure they are trustworthy and secure.

-

Features of the Game

-

Who Wants to Be a Millionaire is not just a simple trivia game. It has many features that make it fun and challenging. Here are some of them:

- -

These are some of the features that make who wants to be a millionaire game for pc an enjoyable and educational experience. You will never get bored or run out of questions with this game.

-

Tips and Tricks for Playing the Game

-

If you want to improve your chances of winning who wants to be a millionaire game for pc, you need to know some tips and tricks that will help you along the way. Here are some of them:

- -

These are some of the tips and tricks that will help you succeed in who wants to be a millionaire game for pc. Remember, the game is not only about luck, but also about skill and knowledge.

-

Conclusion

-

Who Wants to Be a Millionaire is a game that can entertain and educate you at the same time. It is a great way to test your general knowledge and challenge yourself. You can download who wants to be a millionaire game for pc from various platforms, such as Steam, MEmu, G2A, or Green Man Gaming. You can also enjoy the game's features, such as different game modes, countries, questions, neurons, themes, and family mode. And you can use some tips and tricks to improve your performance, such as using lifelines wisely, learning from your mistakes, and practicing and improving your general knowledge.

-

So what are you waiting for? Download who wants to be a millionaire game for pc today and see if you have what it takes to become a millionaire. Good luck and have fun!

-

FAQs

-

Here are some frequently asked questions and answers related to who wants to be a millionaire game for pc:

-
    -
  1. How much does the game cost?
    The game costs $14.99 on Steam, but you may find it cheaper on other platforms or during sales and discounts.
  2. -
  3. Can I play the game online with other players?
    Yes, you can play the game online with up to 99 other players in the Battle Royale mode. You can also play with up to three friends in the Cooperative or Taking Turns modes.
  4. -
  5. Can I customize the game settings?
    Yes, you can customize the game settings, such as the language, the sound, the graphics, the controls, and the difficulty.
  6. -
  7. Can I play the game offline?
    Yes, you can play the game offline in the Free-For-All mode or in the Family mode.
  8. -
  9. Can I play the game on other devices?
    Yes, you can play the game on other devices, such as Android, iOS, PlayStation 4, Xbox One, and Nintendo Switch.
  10. -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/44ov41za8i/FreeVC/speaker_encoder/hparams.py b/spaces/44ov41za8i/FreeVC/speaker_encoder/hparams.py deleted file mode 100644 index 9a8c16471903b0c92253b1d70fcd6a61d10e085f..0000000000000000000000000000000000000000 --- a/spaces/44ov41za8i/FreeVC/speaker_encoder/hparams.py +++ /dev/null @@ -1,31 +0,0 @@ -## Mel-filterbank -mel_window_length = 25 # In milliseconds -mel_window_step = 10 # In milliseconds -mel_n_channels = 40 - - -## Audio -sampling_rate = 16000 -# Number of spectrogram frames in a partial utterance -partials_n_frames = 160 # 1600 ms - - -## Voice Activation Detection -# Window size of the VAD. Must be either 10, 20 or 30 milliseconds. -# This sets the granularity of the VAD. Should not need to be changed. -vad_window_length = 30 # In milliseconds -# Number of frames to average together when performing the moving average smoothing. -# The larger this value, the larger the VAD variations must be to not get smoothed out. -vad_moving_average_width = 8 -# Maximum number of consecutive silent frames a segment can have. -vad_max_silence_length = 6 - - -## Audio volume normalization -audio_norm_target_dBFS = -30 - - -## Model parameters -model_hidden_size = 256 -model_embedding_size = 256 -model_num_layers = 3 \ No newline at end of file diff --git a/spaces/AIConsultant/MusicGen/audiocraft/quantization/__init__.py b/spaces/AIConsultant/MusicGen/audiocraft/quantization/__init__.py deleted file mode 100644 index 1e0c7e429ab96d67be667e23bf7a0ffa389c036b..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/audiocraft/quantization/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""RVQ.""" -# flake8: noqa -from .vq import ResidualVectorQuantizer -from .base import BaseQuantizer, DummyQuantizer, QuantizedResult diff --git a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/__init__.py b/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AIWaves/SOP_Generation-single/README copy.md b/spaces/AIWaves/SOP_Generation-single/README copy.md deleted file mode 100644 index 14493cdf96f5cfe177fcb52e293d61c47103ab42..0000000000000000000000000000000000000000 --- a/spaces/AIWaves/SOP_Generation-single/README copy.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: SOP Generatio-single -emoji: 🐨 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ASJMO/freegpt/g4f/Provider/Providers/hteyun.py b/spaces/ASJMO/freegpt/g4f/Provider/Providers/hteyun.py deleted file mode 100644 index a6eba7c00331d720afb47215e818f5900d4aedcf..0000000000000000000000000000000000000000 --- a/spaces/ASJMO/freegpt/g4f/Provider/Providers/hteyun.py +++ /dev/null @@ -1,34 +0,0 @@ -import requests -import os -import json -from ...typing import sha256, Dict, get_type_hints - -url = 'https://hteyun.com' -model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs): - headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/json, text/plain, */*', - 'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6,zh-TW;q=0.5,zh;q=0.4', - 'Origin': 'https://hteyun.com', - 'Referer': 'https://hteyun.com/chat/', - } - data = { - 'messages': messages, - 'model': model, - 'systemMessage': 'You are ChatGPT, a large language model trained by OpenAI. Follow the user\'s instructions carefully. Respond using russian language.', - 'temperature': 0.7, - 'presence_penalty': 0, - } - response = requests.post(url + '/api/chat-stream', json=data, headers=headers, stream=True) - print(response.json()) - - # Извлечение текста из response - return response.json()['text'] - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/trimPrefix.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/trimPrefix.ts deleted file mode 100644 index d006e66deca639f3f4d208e77a64ba368fab00ee..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/trimPrefix.ts +++ /dev/null @@ -1,6 +0,0 @@ -export function trimPrefix(input: string, prefix: string) { - if (input.startsWith(prefix)) { - return input.slice(prefix.length); - } - return input; -} diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/line.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/line.js deleted file mode 100644 index 57e54ba53a414dd1a2538b0abd46c028b81a5309..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/line.js +++ /dev/null @@ -1,2 +0,0 @@ -import Line from './gameobjects/rendertexture/line/Line.js'; -export default Line; \ No newline at end of file diff --git a/spaces/AlexWang/lama/bin/blur_predicts.py b/spaces/AlexWang/lama/bin/blur_predicts.py deleted file mode 100644 index a14fcc28d5a906ad3a21ab4ba482f38b4fc411cb..0000000000000000000000000000000000000000 --- a/spaces/AlexWang/lama/bin/blur_predicts.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 - -import os - -import cv2 -import numpy as np -import tqdm - -from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset -from saicinpainting.evaluation.utils import load_yaml - - -def main(args): - config = load_yaml(args.config) - - if not args.predictdir.endswith('/'): - args.predictdir += '/' - - dataset = PrecomputedInpaintingResultsDataset(args.datadir, args.predictdir, **config.dataset_kwargs) - - os.makedirs(os.path.dirname(args.outpath), exist_ok=True) - - for img_i in tqdm.trange(len(dataset)): - pred_fname = dataset.pred_filenames[img_i] - cur_out_fname = os.path.join(args.outpath, pred_fname[len(args.predictdir):]) - os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True) - - sample = dataset[img_i] - img = sample['image'] - mask = sample['mask'] - inpainted = sample['inpainted'] - - inpainted_blurred = cv2.GaussianBlur(np.transpose(inpainted, (1, 2, 0)), - ksize=(args.k, args.k), - sigmaX=args.s, sigmaY=args.s, - borderType=cv2.BORDER_REFLECT) - - cur_res = (1 - mask) * np.transpose(img, (1, 2, 0)) + mask * inpainted_blurred - cur_res = np.clip(cur_res * 255, 0, 255).astype('uint8') - cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR) - cv2.imwrite(cur_out_fname, cur_res) - - -if __name__ == '__main__': - import argparse - - aparser = argparse.ArgumentParser() - aparser.add_argument('config', type=str, help='Path to evaluation config') - aparser.add_argument('datadir', type=str, - help='Path to folder with images and masks (output of gen_mask_dataset.py)') - aparser.add_argument('predictdir', type=str, - help='Path to folder with predicts (e.g. predict_hifill_baseline.py)') - aparser.add_argument('outpath', type=str, help='Where to put results') - aparser.add_argument('-s', type=float, default=0.1, help='Gaussian blur sigma') - aparser.add_argument('-k', type=int, default=5, help='Kernel size in gaussian blur') - - main(aparser.parse_args()) diff --git a/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/__init__.py b/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/__init__.py deleted file mode 100644 index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000 --- a/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/inpaint.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/inpaint.md deleted file mode 100644 index dc935d0bd17b44f847ce5a77f10537f3a69ae0e1..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/inpaint.md +++ /dev/null @@ -1,57 +0,0 @@ - - -# Inpainting - -The Stable Diffusion model can also be applied to inpainting which lets you edit specific parts of an image by providing a mask and a text prompt using Stable Diffusion. - -## Tips - -It is recommended to use this pipeline with checkpoints that have been specifically fine-tuned for inpainting, such -as [runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting). Default -text-to-image Stable Diffusion checkpoints, such as -[runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) are also compatible but they might be less performant. - - - -Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! - -If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations! - - - -## StableDiffusionInpaintPipeline - -[[autodoc]] StableDiffusionInpaintPipeline - - all - - __call__ - - enable_attention_slicing - - disable_attention_slicing - - enable_xformers_memory_efficient_attention - - disable_xformers_memory_efficient_attention - - load_textual_inversion - - load_lora_weights - - save_lora_weights - -## StableDiffusionPipelineOutput - -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput - -## FlaxStableDiffusionInpaintPipeline - -[[autodoc]] FlaxStableDiffusionInpaintPipeline - - all - - __call__ - -## FlaxStableDiffusionPipelineOutput - -[[autodoc]] pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/contribute_pipeline.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/contribute_pipeline.md deleted file mode 100644 index 2c2b5abedcec94e65c3d44fc05baf15996e6462c..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/contribute_pipeline.md +++ /dev/null @@ -1,181 +0,0 @@ - - -# How to contribute a community pipeline - - - -💡 Take a look at GitHub Issue [#841](https://github.com/huggingface/diffusers/issues/841) for more context about why we're adding community pipelines to help everyone easily share their work without being slowed down. - - - -Community pipelines allow you to add any additional features you'd like on top of the [`DiffusionPipeline`]. The main benefit of building on top of the `DiffusionPipeline` is anyone can load and use your pipeline by only adding one more argument, making it super easy for the community to access. - -This guide will show you how to create a community pipeline and explain how they work. To keep things simple, you'll create a "one-step" pipeline where the `UNet` does a single forward pass and calls the scheduler once. - -## Initialize the pipeline - -You should start by creating a `one_step_unet.py` file for your community pipeline. In this file, create a pipeline class that inherits from the [`DiffusionPipeline`] to be able to load model weights and the scheduler configuration from the Hub. The one-step pipeline needs a `UNet` and a scheduler, so you'll need to add these as arguments to the `__init__` function: - -```python -from diffusers import DiffusionPipeline -import torch - - -class UnetSchedulerOneForwardPipeline(DiffusionPipeline): - def __init__(self, unet, scheduler): - super().__init__() -``` - -To ensure your pipeline and its components (`unet` and `scheduler`) can be saved with [`~DiffusionPipeline.save_pretrained`], add them to the `register_modules` function: - -```diff - from diffusers import DiffusionPipeline - import torch - - class UnetSchedulerOneForwardPipeline(DiffusionPipeline): - def __init__(self, unet, scheduler): - super().__init__() - -+ self.register_modules(unet=unet, scheduler=scheduler) -``` - -Cool, the `__init__` step is done and you can move to the forward pass now! 🔥 - -## Define the forward pass - -In the forward pass, which we recommend defining as `__call__`, you have complete creative freedom to add whatever feature you'd like. For our amazing one-step pipeline, create a random image and only call the `unet` and `scheduler` once by setting `timestep=1`: - -```diff - from diffusers import DiffusionPipeline - import torch - - - class UnetSchedulerOneForwardPipeline(DiffusionPipeline): - def __init__(self, unet, scheduler): - super().__init__() - - self.register_modules(unet=unet, scheduler=scheduler) - -+ def __call__(self): -+ image = torch.randn( -+ (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), -+ ) -+ timestep = 1 - -+ model_output = self.unet(image, timestep).sample -+ scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample - -+ return scheduler_output -``` - -That's it! 🚀 You can now run this pipeline by passing a `unet` and `scheduler` to it: - -```python -from diffusers import DDPMScheduler, UNet2DModel - -scheduler = DDPMScheduler() -unet = UNet2DModel() - -pipeline = UnetSchedulerOneForwardPipeline(unet=unet, scheduler=scheduler) - -output = pipeline() -``` - -But what's even better is you can load pre-existing weights into the pipeline if the pipeline structure is identical. For example, you can load the [`google/ddpm-cifar10-32`](https://huggingface.co/google/ddpm-cifar10-32) weights into the one-step pipeline: - -```python -pipeline = UnetSchedulerOneForwardPipeline.from_pretrained("google/ddpm-cifar10-32") - -output = pipeline() -``` - -## Share your pipeline - -Open a Pull Request on the 🧨 Diffusers [repository](https://github.com/huggingface/diffusers) to add your awesome pipeline in `one_step_unet.py` to the [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) subfolder. - -Once it is merged, anyone with `diffusers >= 0.4.0` installed can use this pipeline magically 🪄 by specifying it in the `custom_pipeline` argument: - -```python -from diffusers import DiffusionPipeline - -pipe = DiffusionPipeline.from_pretrained("google/ddpm-cifar10-32", custom_pipeline="one_step_unet") -pipe() -``` - -Another way to share your community pipeline is to upload the `one_step_unet.py` file directly to your preferred [model repository](https://huggingface.co/docs/hub/models-uploading) on the Hub. Instead of specifying the `one_step_unet.py` file, pass the model repository id to the `custom_pipeline` argument: - -```python -from diffusers import DiffusionPipeline - -pipeline = DiffusionPipeline.from_pretrained("google/ddpm-cifar10-32", custom_pipeline="stevhliu/one_step_unet") -``` - -Take a look at the following table to compare the two sharing workflows to help you decide the best option for you: - -| | GitHub community pipeline | HF Hub community pipeline | -|----------------|------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| -| usage | same | same | -| review process | open a Pull Request on GitHub and undergo a review process from the Diffusers team before merging; may be slower | upload directly to a Hub repository without any review; this is the fastest workflow | -| visibility | included in the official Diffusers repository and documentation | included on your HF Hub profile and relies on your own usage/promotion to gain visibility | - - - -💡 You can use whatever package you want in your community pipeline file - as long as the user has it installed, everything will work fine. Make sure you have one and only one pipeline class that inherits from `DiffusionPipeline` because this is automatically detected. - - - -## How do community pipelines work? - -A community pipeline is a class that inherits from [`DiffusionPipeline`] which means: - -- It can be loaded with the [`custom_pipeline`] argument. -- The model weights and scheduler configuration are loaded from [`pretrained_model_name_or_path`]. -- The code that implements a feature in the community pipeline is defined in a `pipeline.py` file. - -Sometimes you can't load all the pipeline components weights from an official repository. In this case, the other components should be passed directly to the pipeline: - -```python -from diffusers import DiffusionPipeline -from transformers import CLIPFeatureExtractor, CLIPModel - -model_id = "CompVis/stable-diffusion-v1-4" -clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" - -feature_extractor = CLIPFeatureExtractor.from_pretrained(clip_model_id) -clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16) - -pipeline = DiffusionPipeline.from_pretrained( - model_id, - custom_pipeline="clip_guided_stable_diffusion", - clip_model=clip_model, - feature_extractor=feature_extractor, - scheduler=scheduler, - torch_dtype=torch.float16, -) -``` - -The magic behind community pipelines is contained in the following code. It allows the community pipeline to be loaded from GitHub or the Hub, and it'll be available to all 🧨 Diffusers packages. - -```python -# 2. Load the pipeline class, if using custom module then load it from the hub -# if we load from explicit class, let's use it -if custom_pipeline is not None: - pipeline_class = get_class_from_dynamic_module( - custom_pipeline, module_file=CUSTOM_PIPELINE_FILE_NAME, cache_dir=custom_pipeline - ) -elif cls != DiffusionPipeline: - pipeline_class = cls -else: - diffusers_module = importlib.import_module(cls.__module__.split(".")[0]) - pipeline_class = getattr(diffusers_module, config_dict["_class_name"]) -``` diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/ghm_loss.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/ghm_loss.py deleted file mode 100644 index 8969a23fd98bb746415f96ac5e4ad9e37ba3af52..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/ghm_loss.py +++ /dev/null @@ -1,172 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..builder import LOSSES - - -def _expand_onehot_labels(labels, label_weights, label_channels): - bin_labels = labels.new_full((labels.size(0), label_channels), 0) - inds = torch.nonzero( - (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() - if inds.numel() > 0: - bin_labels[inds, labels[inds]] = 1 - bin_label_weights = label_weights.view(-1, 1).expand( - label_weights.size(0), label_channels) - return bin_labels, bin_label_weights - - -# TODO: code refactoring to make it consistent with other losses -@LOSSES.register_module() -class GHMC(nn.Module): - """GHM Classification Loss. - - Details of the theorem can be viewed in the paper - `Gradient Harmonized Single-stage Detector - `_. - - Args: - bins (int): Number of the unit regions for distribution calculation. - momentum (float): The parameter for moving average. - use_sigmoid (bool): Can only be true for BCE based loss now. - loss_weight (float): The weight of the total GHM-C loss. - """ - - def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0): - super(GHMC, self).__init__() - self.bins = bins - self.momentum = momentum - edges = torch.arange(bins + 1).float() / bins - self.register_buffer('edges', edges) - self.edges[-1] += 1e-6 - if momentum > 0: - acc_sum = torch.zeros(bins) - self.register_buffer('acc_sum', acc_sum) - self.use_sigmoid = use_sigmoid - if not self.use_sigmoid: - raise NotImplementedError - self.loss_weight = loss_weight - - def forward(self, pred, target, label_weight, *args, **kwargs): - """Calculate the GHM-C loss. - - Args: - pred (float tensor of size [batch_num, class_num]): - The direct prediction of classification fc layer. - target (float tensor of size [batch_num, class_num]): - Binary class target for each sample. - label_weight (float tensor of size [batch_num, class_num]): - the value is 1 if the sample is valid and 0 if ignored. - Returns: - The gradient harmonized loss. - """ - # the target should be binary class label - if pred.dim() != target.dim(): - target, label_weight = _expand_onehot_labels( - target, label_weight, pred.size(-1)) - target, label_weight = target.float(), label_weight.float() - edges = self.edges - mmt = self.momentum - weights = torch.zeros_like(pred) - - # gradient length - g = torch.abs(pred.sigmoid().detach() - target) - - valid = label_weight > 0 - tot = max(valid.float().sum().item(), 1.0) - n = 0 # n valid bins - for i in range(self.bins): - inds = (g >= edges[i]) & (g < edges[i + 1]) & valid - num_in_bin = inds.sum().item() - if num_in_bin > 0: - if mmt > 0: - self.acc_sum[i] = mmt * self.acc_sum[i] \ - + (1 - mmt) * num_in_bin - weights[inds] = tot / self.acc_sum[i] - else: - weights[inds] = tot / num_in_bin - n += 1 - if n > 0: - weights = weights / n - - loss = F.binary_cross_entropy_with_logits( - pred, target, weights, reduction='sum') / tot - return loss * self.loss_weight - - -# TODO: code refactoring to make it consistent with other losses -@LOSSES.register_module() -class GHMR(nn.Module): - """GHM Regression Loss. - - Details of the theorem can be viewed in the paper - `Gradient Harmonized Single-stage Detector - `_. - - Args: - mu (float): The parameter for the Authentic Smooth L1 loss. - bins (int): Number of the unit regions for distribution calculation. - momentum (float): The parameter for moving average. - loss_weight (float): The weight of the total GHM-R loss. - """ - - def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0): - super(GHMR, self).__init__() - self.mu = mu - self.bins = bins - edges = torch.arange(bins + 1).float() / bins - self.register_buffer('edges', edges) - self.edges[-1] = 1e3 - self.momentum = momentum - if momentum > 0: - acc_sum = torch.zeros(bins) - self.register_buffer('acc_sum', acc_sum) - self.loss_weight = loss_weight - - # TODO: support reduction parameter - def forward(self, pred, target, label_weight, avg_factor=None): - """Calculate the GHM-R loss. - - Args: - pred (float tensor of size [batch_num, 4 (* class_num)]): - The prediction of box regression layer. Channel number can be 4 - or 4 * class_num depending on whether it is class-agnostic. - target (float tensor of size [batch_num, 4 (* class_num)]): - The target regression values with the same size of pred. - label_weight (float tensor of size [batch_num, 4 (* class_num)]): - The weight of each sample, 0 if ignored. - Returns: - The gradient harmonized loss. - """ - mu = self.mu - edges = self.edges - mmt = self.momentum - - # ASL1 loss - diff = pred - target - loss = torch.sqrt(diff * diff + mu * mu) - mu - - # gradient length - g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() - weights = torch.zeros_like(g) - - valid = label_weight > 0 - tot = max(label_weight.float().sum().item(), 1.0) - n = 0 # n: valid bins - for i in range(self.bins): - inds = (g >= edges[i]) & (g < edges[i + 1]) & valid - num_in_bin = inds.sum().item() - if num_in_bin > 0: - n += 1 - if mmt > 0: - self.acc_sum[i] = mmt * self.acc_sum[i] \ - + (1 - mmt) * num_in_bin - weights[inds] = tot / self.acc_sum[i] - else: - weights[inds] = tot / num_in_bin - if n > 0: - weights /= n - - loss = loss * weights - loss = loss.sum() / tot - return loss * self.loss_weight diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/__init__.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/__init__.py deleted file mode 100644 index abfbe2624eecb73b029e9bcb7e2283bbf2a744ea..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from .coarse_mask_head import CoarseMaskHead -from .fcn_mask_head import FCNMaskHead -from .feature_relay_head import FeatureRelayHead -from .fused_semantic_head import FusedSemanticHead -from .global_context_head import GlobalContextHead -from .grid_head import GridHead -from .htc_mask_head import HTCMaskHead -from .mask_point_head import MaskPointHead -from .maskiou_head import MaskIoUHead -from .scnet_mask_head import SCNetMaskHead -from .scnet_semantic_head import SCNetSemanticHead - -__all__ = [ - 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', - 'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead', - 'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead' -] diff --git a/spaces/Apex-X/nono/roop/processors/frame/__init__.py b/spaces/Apex-X/nono/roop/processors/frame/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Atualli/yoloxTeste/yoloxdetect2/configs/__init__.py b/spaces/Atualli/yoloxTeste/yoloxdetect2/configs/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis_v1_categories.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis_v1_categories.py deleted file mode 100644 index 7374e6968bb006f5d8c49e75d9d3b31ea3d77d05..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis_v1_categories.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Autogen with -# with open("lvis_v1_val.json", "r") as f: -# a = json.load(f) -# c = a["categories"] -# for x in c: -# del x["image_count"] -# del x["instance_count"] -# LVIS_CATEGORIES = repr(c) + " # noqa" -# with open("/tmp/lvis_categories.py", "wt") as f: -# f.write(f"LVIS_CATEGORIES = {LVIS_CATEGORIES}") -# Then paste the contents of that file below - -# fmt: off -LVIS_CATEGORIES = [{'frequency': 'c', 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'id': 1, 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'id': 2, 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'id': 3, 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'f', 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'id': 4, 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'id': 5, 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'c', 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'id': 6, 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'synset': 'almond.n.02', 'synonyms': ['almond'], 'id': 7, 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'id': 8, 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'c', 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'id': 9, 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'id': 10, 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'id': 11, 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'synset': 'apple.n.01', 'synonyms': ['apple'], 'id': 12, 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'id': 13, 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'id': 14, 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'synset': 'apron.n.01', 'synonyms': ['apron'], 'id': 15, 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'id': 16, 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'r', 'synset': 'arctic.n.02', 'synonyms': ['arctic_(type_of_shoe)', 'galosh', 'golosh', 'rubber_(type_of_shoe)', 'gumshoe'], 'id': 17, 'def': 'a waterproof overshoe that protects shoes from water or snow', 'name': 'arctic_(type_of_shoe)'}, {'frequency': 'c', 'synset': 'armband.n.02', 'synonyms': ['armband'], 'id': 18, 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'id': 19, 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'id': 20, 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'id': 21, 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'id': 22, 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'id': 23, 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'id': 24, 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'id': 25, 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'id': 26, 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'f', 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'id': 27, 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'id': 28, 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'synset': 'awning.n.01', 'synonyms': ['awning'], 'id': 29, 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'id': 30, 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'r', 'synset': 'baboon.n.01', 'synonyms': ['baboon'], 'id': 31, 'def': 'large terrestrial monkeys having doglike muzzles', 'name': 'baboon'}, {'frequency': 'f', 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'id': 32, 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'id': 33, 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'id': 34, 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'id': 35, 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'id': 36, 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'id': 37, 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'id': 38, 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'id': 39, 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'id': 40, 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'synset': 'ball.n.06', 'synonyms': ['ball'], 'id': 41, 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'id': 42, 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'id': 43, 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'id': 44, 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'synset': 'banana.n.02', 'synonyms': ['banana'], 'id': 45, 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'c', 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'id': 46, 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'id': 47, 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'f', 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'id': 48, 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'id': 49, 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'id': 50, 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'id': 51, 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'synset': 'barge.n.01', 'synonyms': ['barge'], 'id': 52, 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'id': 53, 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'id': 54, 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'id': 55, 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'id': 56, 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'id': 57, 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'id': 58, 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'id': 59, 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'id': 60, 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'id': 61, 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'id': 62, 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'id': 63, 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'c', 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'id': 64, 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'id': 65, 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'id': 66, 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'id': 67, 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'id': 68, 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'id': 69, 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'synset': 'battery.n.02', 'synonyms': ['battery'], 'id': 70, 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'id': 71, 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'synset': 'bead.n.01', 'synonyms': ['bead'], 'id': 72, 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'c', 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'id': 73, 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'id': 74, 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'id': 75, 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'synset': 'bear.n.01', 'synonyms': ['bear'], 'id': 76, 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'synset': 'bed.n.01', 'synonyms': ['bed'], 'id': 77, 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'r', 'synset': 'bedpan.n.01', 'synonyms': ['bedpan'], 'id': 78, 'def': 'a shallow vessel used by a bedridden patient for defecation and urination', 'name': 'bedpan'}, {'frequency': 'f', 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'id': 79, 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'synset': 'beef.n.01', 'synonyms': ['cow'], 'id': 80, 'def': 'cattle/cow', 'name': 'cow'}, {'frequency': 'f', 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'id': 81, 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'id': 82, 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'id': 83, 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'id': 84, 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'id': 85, 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'synset': 'bell.n.01', 'synonyms': ['bell'], 'id': 86, 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'id': 87, 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'synset': 'belt.n.02', 'synonyms': ['belt'], 'id': 88, 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'id': 89, 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'synset': 'bench.n.01', 'synonyms': ['bench'], 'id': 90, 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'synset': 'beret.n.01', 'synonyms': ['beret'], 'id': 91, 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'synset': 'bib.n.02', 'synonyms': ['bib'], 'id': 92, 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'id': 93, 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'id': 94, 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'id': 95, 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'f', 'synset': 'billboard.n.01', 'synonyms': ['billboard'], 'id': 96, 'def': 'large outdoor signboard', 'name': 'billboard'}, {'frequency': 'c', 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'id': 97, 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'id': 98, 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'synset': 'bird.n.01', 'synonyms': ['bird'], 'id': 99, 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'c', 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'id': 100, 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'c', 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'id': 101, 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'id': 102, 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'id': 103, 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'id': 104, 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'id': 105, 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'id': 106, 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'id': 107, 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'synset': 'blackberry.n.01', 'synonyms': ['blackberry'], 'id': 108, 'def': 'large sweet black or very dark purple edible aggregate fruit', 'name': 'blackberry'}, {'frequency': 'f', 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'id': 109, 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'id': 110, 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'id': 111, 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'id': 112, 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'id': 113, 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'f', 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'id': 114, 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'f', 'synset': 'blouse.n.01', 'synonyms': ['blouse'], 'id': 115, 'def': 'a top worn by women', 'name': 'blouse'}, {'frequency': 'f', 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'id': 116, 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'id': 117, 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'id': 118, 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'r', 'synset': 'bob.n.05', 'synonyms': ['bob', 'bobber', 'bobfloat'], 'id': 119, 'def': 'a small float usually made of cork; attached to a fishing line', 'name': 'bob'}, {'frequency': 'c', 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'id': 120, 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'c', 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'id': 121, 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'id': 122, 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'id': 123, 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'id': 124, 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'id': 125, 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'id': 126, 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'synset': 'book.n.01', 'synonyms': ['book'], 'id': 127, 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'c', 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'id': 128, 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'id': 129, 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'id': 130, 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'id': 131, 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'synset': 'boot.n.01', 'synonyms': ['boot'], 'id': 132, 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'id': 133, 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'id': 134, 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'id': 135, 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'id': 136, 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'id': 137, 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'id': 138, 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'id': 139, 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'id': 140, 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'id': 141, 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'id': 142, 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'f', 'synset': 'box.n.01', 'synonyms': ['box'], 'id': 143, 'def': 'a (usually rectangular) container; may have a lid', 'name': 'box'}, {'frequency': 'r', 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'id': 144, 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'id': 145, 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'id': 146, 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'id': 147, 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'id': 148, 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'id': 149, 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'f', 'synset': 'bread.n.01', 'synonyms': ['bread'], 'id': 150, 'def': 'food made from dough of flour or meal and usually raised with yeast or baking powder and then baked', 'name': 'bread'}, {'frequency': 'r', 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'id': 151, 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'f', 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'id': 152, 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'id': 153, 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'f', 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'id': 154, 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'id': 155, 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'synset': 'broom.n.01', 'synonyms': ['broom'], 'id': 156, 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'id': 157, 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'id': 158, 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'id': 159, 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'id': 160, 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'id': 161, 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'synset': 'bull.n.11', 'synonyms': ['horned_cow'], 'id': 162, 'def': 'a cow with horns', 'name': 'bull'}, {'frequency': 'c', 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'id': 163, 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'id': 164, 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'id': 165, 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'id': 166, 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'id': 167, 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'id': 168, 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'f', 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'id': 169, 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'id': 170, 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'id': 171, 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'id': 172, 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'id': 173, 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'id': 174, 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'f', 'synset': 'butter.n.01', 'synonyms': ['butter'], 'id': 175, 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'id': 176, 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'synset': 'button.n.01', 'synonyms': ['button'], 'id': 177, 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'id': 178, 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'id': 179, 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'c', 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'id': 180, 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'id': 181, 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'id': 182, 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'synset': 'cake.n.03', 'synonyms': ['cake'], 'id': 183, 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'id': 184, 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'id': 185, 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'synset': 'calf.n.01', 'synonyms': ['calf'], 'id': 186, 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'id': 187, 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'synset': 'camel.n.01', 'synonyms': ['camel'], 'id': 188, 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'synset': 'camera.n.01', 'synonyms': ['camera'], 'id': 189, 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'id': 190, 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'id': 191, 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'id': 192, 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'id': 193, 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'f', 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'id': 194, 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'id': 195, 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'id': 196, 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'id': 197, 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'id': 198, 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'id': 199, 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'c', 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'id': 200, 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'c', 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'id': 201, 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'id': 202, 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'f', 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'id': 203, 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'id': 204, 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'c', 'synset': 'cape.n.02', 'synonyms': ['cape'], 'id': 205, 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'id': 206, 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'id': 207, 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'id': 208, 'def': 'a wheeled vehicle adapted to the rails of railroad (mark each individual railcar separately)', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'id': 209, 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'id': 210, 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'id': 211, 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'synset': 'card.n.03', 'synonyms': ['card'], 'id': 212, 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'c', 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'id': 213, 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'id': 214, 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'id': 215, 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'id': 216, 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'id': 217, 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'f', 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'id': 218, 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'synset': 'cart.n.01', 'synonyms': ['cart'], 'id': 219, 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'synset': 'carton.n.02', 'synonyms': ['carton'], 'id': 220, 'def': 'a container made of cardboard for holding food or drink', 'name': 'carton'}, {'frequency': 'c', 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'id': 221, 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'id': 222, 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'id': 223, 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'id': 224, 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'synset': 'cat.n.01', 'synonyms': ['cat'], 'id': 225, 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'f', 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'id': 226, 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'c', 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'id': 227, 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'id': 228, 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'f', 'synset': 'celery.n.01', 'synonyms': ['celery'], 'id': 229, 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'id': 230, 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'id': 231, 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'synset': 'chair.n.01', 'synonyms': ['chair'], 'id': 232, 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'id': 233, 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'synset': 'chalice.n.01', 'synonyms': ['chalice'], 'id': 234, 'def': 'a bowl-shaped drinking vessel; especially the Eucharistic cup', 'name': 'chalice'}, {'frequency': 'f', 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'id': 235, 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'synset': 'chap.n.04', 'synonyms': ['chap'], 'id': 236, 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'id': 237, 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'id': 238, 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'id': 239, 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'id': 240, 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'c', 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'id': 241, 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'id': 242, 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'c', 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'id': 243, 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'id': 244, 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'id': 245, 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'id': 246, 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'id': 247, 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'id': 248, 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'id': 249, 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'id': 250, 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'id': 251, 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'id': 252, 'def': 'shirt collar, animal collar, or tight-fitting necklace', 'name': 'choker'}, {'frequency': 'f', 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'id': 253, 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'f', 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'id': 254, 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'id': 255, 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'synset': 'chute.n.02', 'synonyms': ['slide'], 'id': 256, 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'id': 257, 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'id': 258, 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'f', 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'id': 259, 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'id': 260, 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'id': 261, 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'id': 262, 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'c', 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'id': 263, 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'id': 264, 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'synset': 'cleat.n.02', 'synonyms': ['cleat_(for_securing_rope)'], 'id': 265, 'def': 'a fastener (usually with two projecting horns) around which a rope can be secured', 'name': 'cleat_(for_securing_rope)'}, {'frequency': 'r', 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'id': 266, 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'synset': 'clip.n.03', 'synonyms': ['clip'], 'id': 267, 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'id': 268, 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'r', 'synset': 'clipper.n.03', 'synonyms': ['clippers_(for_plants)'], 'id': 269, 'def': 'shears for cutting grass or shrubbery (often used in the plural)', 'name': 'clippers_(for_plants)'}, {'frequency': 'r', 'synset': 'cloak.n.02', 'synonyms': ['cloak'], 'id': 270, 'def': 'a loose outer garment', 'name': 'cloak'}, {'frequency': 'f', 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'id': 271, 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'id': 272, 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'id': 273, 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'id': 274, 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'id': 275, 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'id': 276, 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'synset': 'coat.n.01', 'synonyms': ['coat'], 'id': 277, 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'id': 278, 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'c', 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'id': 279, 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'id': 280, 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'r', 'synset': 'cockroach.n.01', 'synonyms': ['cockroach'], 'id': 281, 'def': 'any of numerous chiefly nocturnal insects; some are domestic pests', 'name': 'cockroach'}, {'frequency': 'r', 'synset': 'cocoa.n.01', 'synonyms': ['cocoa_(beverage)', 'hot_chocolate_(beverage)', 'drinking_chocolate'], 'id': 282, 'def': 'a beverage made from cocoa powder and milk and sugar; usually drunk hot', 'name': 'cocoa_(beverage)'}, {'frequency': 'c', 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'id': 283, 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'f', 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'id': 284, 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'id': 285, 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'id': 286, 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'synset': 'coil.n.05', 'synonyms': ['coil'], 'id': 287, 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'synset': 'coin.n.01', 'synonyms': ['coin'], 'id': 288, 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'c', 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'id': 289, 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'id': 290, 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'id': 291, 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'id': 292, 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'id': 293, 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'id': 294, 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'r', 'synset': 'compass.n.01', 'synonyms': ['compass'], 'id': 295, 'def': 'navigational instrument for finding directions', 'name': 'compass'}, {'frequency': 'f', 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'id': 296, 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'f', 'synset': 'condiment.n.01', 'synonyms': ['condiment'], 'id': 297, 'def': 'a preparation (a sauce or relish or spice) to enhance flavor or enjoyment', 'name': 'condiment'}, {'frequency': 'f', 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'id': 298, 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'id': 299, 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'id': 300, 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'id': 301, 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'r', 'synset': 'cooker.n.01', 'synonyms': ['cooker'], 'id': 302, 'def': 'a utensil for cooking', 'name': 'cooker'}, {'frequency': 'f', 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'id': 303, 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'id': 304, 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'id': 305, 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'f', 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'id': 306, 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'id': 307, 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'c', 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'id': 308, 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'f', 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'id': 309, 'def': 'ears or kernels of corn that can be prepared and served for human food (only mark individual ears or kernels)', 'name': 'edible_corn'}, {'frequency': 'r', 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'id': 310, 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'id': 311, 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'id': 312, 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'id': 313, 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'c', 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'id': 314, 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'c', 'synset': 'costume.n.04', 'synonyms': ['costume'], 'id': 315, 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'id': 316, 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'id': 317, 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'c', 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'id': 318, 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'id': 319, 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'c', 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'id': 320, 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'r', 'synset': 'crab.n.05', 'synonyms': ['crabmeat'], 'id': 321, 'def': 'the edible flesh of any of various crabs', 'name': 'crabmeat'}, {'frequency': 'c', 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'id': 322, 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'id': 323, 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'synset': 'crate.n.01', 'synonyms': ['crate'], 'id': 324, 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'c', 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'id': 325, 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'id': 326, 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'c', 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'id': 327, 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'id': 328, 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'id': 329, 'def': 'an earthen jar (made of baked clay) or a modern electric crockpot', 'name': 'crock_pot'}, {'frequency': 'f', 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'id': 330, 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'id': 331, 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'c', 'synset': 'crow.n.01', 'synonyms': ['crow'], 'id': 332, 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'r', 'synset': 'crowbar.n.01', 'synonyms': ['crowbar', 'wrecking_bar', 'pry_bar'], 'id': 333, 'def': 'a heavy iron lever with one end forged into a wedge', 'name': 'crowbar'}, {'frequency': 'c', 'synset': 'crown.n.04', 'synonyms': ['crown'], 'id': 334, 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'id': 335, 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'id': 336, 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'id': 337, 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'f', 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'id': 338, 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'c', 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'id': 339, 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'id': 340, 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'c', 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'id': 341, 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'id': 342, 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'id': 343, 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'synset': 'cup.n.01', 'synonyms': ['cup'], 'id': 344, 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'id': 345, 'def': 'a metal award or cup-shaped vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'f', 'synset': 'cupboard.n.01', 'synonyms': ['cupboard', 'closet'], 'id': 346, 'def': 'a small room (or recess) or cabinet used for storage space', 'name': 'cupboard'}, {'frequency': 'f', 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'id': 347, 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'id': 348, 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'id': 349, 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'id': 350, 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'id': 351, 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'id': 352, 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'id': 353, 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'id': 354, 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'synset': 'dalmatian.n.02', 'synonyms': ['dalmatian'], 'id': 355, 'def': 'a large breed having a smooth white coat with black or brown spots', 'name': 'dalmatian'}, {'frequency': 'c', 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'id': 356, 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'id': 357, 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'id': 358, 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'id': 359, 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'id': 360, 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'synset': 'desk.n.01', 'synonyms': ['desk'], 'id': 361, 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'id': 362, 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'id': 363, 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'id': 364, 'def': 'yearly planner book', 'name': 'diary'}, {'frequency': 'r', 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'id': 365, 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'id': 366, 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'id': 367, 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'id': 368, 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'f', 'synset': 'dish.n.01', 'synonyms': ['dish'], 'id': 369, 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'id': 370, 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'id': 371, 'def': 'a cloth for washing dishes or cleaning in general', 'name': 'dishrag'}, {'frequency': 'f', 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'id': 372, 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'id': 373, 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid', 'dishsoap'], 'id': 374, 'def': 'dishsoap or dish detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'f', 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'id': 375, 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'r', 'synset': 'diving_board.n.01', 'synonyms': ['diving_board'], 'id': 376, 'def': 'a springboard from which swimmers can dive', 'name': 'diving_board'}, {'frequency': 'f', 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'id': 377, 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'synset': 'dog.n.01', 'synonyms': ['dog'], 'id': 378, 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'id': 379, 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'f', 'synset': 'doll.n.01', 'synonyms': ['doll'], 'id': 380, 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'id': 381, 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'synset': 'dollhouse.n.01', 'synonyms': ['dollhouse', "doll's_house"], 'id': 382, 'def': "a house so small that it is likened to a child's plaything", 'name': 'dollhouse'}, {'frequency': 'c', 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'id': 383, 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'id': 384, 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'f', 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'id': 385, 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'id': 386, 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'id': 387, 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'synset': 'dove.n.01', 'synonyms': ['dove'], 'id': 388, 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'id': 389, 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'id': 390, 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'id': 391, 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'id': 392, 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'id': 393, 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'f', 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'id': 394, 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'f', 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'id': 395, 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'synset': 'drill.n.01', 'synonyms': ['drill'], 'id': 396, 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'synset': 'drone.n.04', 'synonyms': ['drone'], 'id': 397, 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'id': 398, 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'id': 399, 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'id': 400, 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'synset': 'duck.n.01', 'synonyms': ['duck'], 'id': 401, 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'c', 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'id': 402, 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'id': 403, 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'id': 404, 'def': 'a large cylindrical bag of heavy cloth (does not include suitcases)', 'name': 'duffel_bag'}, {'frequency': 'r', 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'id': 405, 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'id': 406, 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'id': 407, 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'c', 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'id': 408, 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'id': 409, 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'id': 410, 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'synset': 'earring.n.01', 'synonyms': ['earring'], 'id': 411, 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'synset': 'easel.n.01', 'synonyms': ['easel'], 'id': 412, 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'id': 413, 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'synset': 'eel.n.01', 'synonyms': ['eel'], 'id': 414, 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'id': 415, 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'id': 416, 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'id': 417, 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'id': 418, 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'id': 419, 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'id': 420, 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'id': 421, 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'id': 422, 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'c', 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'id': 423, 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'id': 424, 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'id': 425, 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'id': 426, 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'id': 427, 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'id': 428, 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'synset': 'fan.n.01', 'synonyms': ['fan'], 'id': 429, 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'id': 430, 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'id': 431, 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'id': 432, 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'id': 433, 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'c', 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'id': 434, 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'id': 435, 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'id': 436, 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'id': 437, 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'id': 438, 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'id': 439, 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'id': 440, 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'f', 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'id': 441, 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'f', 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'id': 442, 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'id': 443, 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'id': 444, 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'id': 445, 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'r', 'synset': 'first-aid_kit.n.01', 'synonyms': ['first-aid_kit'], 'id': 446, 'def': 'kit consisting of a set of bandages and medicines for giving first aid', 'name': 'first-aid_kit'}, {'frequency': 'f', 'synset': 'fish.n.01', 'synonyms': ['fish'], 'id': 447, 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'c', 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'id': 448, 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'id': 449, 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'c', 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'id': 450, 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'synset': 'flag.n.01', 'synonyms': ['flag'], 'id': 451, 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'id': 452, 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'id': 453, 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'id': 454, 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'c', 'synset': 'flap.n.01', 'synonyms': ['flap'], 'id': 455, 'def': 'any broad thin covering attached at one edge, such as a mud flap next to a wheel or a flap on an airplane wing', 'name': 'flap'}, {'frequency': 'r', 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'id': 456, 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'id': 457, 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'id': 458, 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'id': 459, 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'id': 460, 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'id': 461, 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'id': 462, 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'c', 'synset': 'foal.n.01', 'synonyms': ['foal'], 'id': 463, 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'id': 464, 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'id': 465, 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'id': 466, 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'id': 467, 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'id': 468, 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'synset': 'fork.n.01', 'synonyms': ['fork'], 'id': 469, 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'c', 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'id': 470, 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'c', 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'id': 471, 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'c', 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'id': 472, 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'id': 473, 'def': 'anything that freshens air by removing or covering odor', 'name': 'freshener'}, {'frequency': 'f', 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'id': 474, 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'id': 475, 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'id': 476, 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'f', 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'id': 477, 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'id': 478, 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'id': 479, 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'r', 'synset': 'futon.n.01', 'synonyms': ['futon'], 'id': 480, 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'id': 481, 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'id': 482, 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'id': 483, 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'id': 484, 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'id': 485, 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'id': 486, 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'id': 487, 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'id': 488, 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'c', 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'id': 489, 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'id': 490, 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'id': 491, 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'r', 'synset': 'generator.n.02', 'synonyms': ['generator'], 'id': 492, 'def': 'engine that converts mechanical energy into electrical energy by electromagnetic induction', 'name': 'generator'}, {'frequency': 'c', 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'id': 493, 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'id': 494, 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'id': 495, 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'id': 496, 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'id': 497, 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'id': 498, 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'synset': 'globe.n.03', 'synonyms': ['globe'], 'id': 499, 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'synset': 'glove.n.02', 'synonyms': ['glove'], 'id': 500, 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'synset': 'goat.n.01', 'synonyms': ['goat'], 'id': 501, 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'id': 502, 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'id': 503, 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'c', 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'id': 504, 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'id': 505, 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'id': 506, 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'synset': 'goose.n.01', 'synonyms': ['goose'], 'id': 507, 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'id': 508, 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'id': 509, 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'f', 'synset': 'grape.n.01', 'synonyms': ['grape'], 'id': 510, 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'c', 'synset': 'grater.n.01', 'synonyms': ['grater'], 'id': 511, 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'id': 512, 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'id': 513, 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'f', 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'id': 514, 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'f', 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'id': 515, 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'id': 516, 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'f', 'synset': 'grill.n.02', 'synonyms': ['grill', 'grille', 'grillwork', 'radiator_grille'], 'id': 517, 'def': 'a framework of metal bars used as a partition or a grate', 'name': 'grill'}, {'frequency': 'r', 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'id': 518, 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'id': 519, 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'id': 520, 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'f', 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'id': 521, 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'id': 522, 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'synset': 'gun.n.01', 'synonyms': ['gun'], 'id': 523, 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'f', 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'id': 524, 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'id': 525, 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'id': 526, 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'r', 'synset': 'halter.n.03', 'synonyms': ['halter_top'], 'id': 527, 'def': "a woman's top that fastens behind the back and neck leaving the back and arms uncovered", 'name': 'halter_top'}, {'frequency': 'f', 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'id': 528, 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'id': 529, 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'id': 530, 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'c', 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'id': 531, 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'id': 532, 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'c', 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'id': 533, 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'f', 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'id': 534, 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'id': 535, 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'id': 536, 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'id': 537, 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'id': 538, 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'id': 539, 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'id': 540, 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'id': 541, 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'id': 542, 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'id': 543, 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'synset': 'hat.n.01', 'synonyms': ['hat'], 'id': 544, 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'id': 545, 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'c', 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'id': 546, 'def': 'a garment that covers the head OR face', 'name': 'veil'}, {'frequency': 'f', 'synset': 'headband.n.01', 'synonyms': ['headband'], 'id': 547, 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'id': 548, 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'id': 549, 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'id': 550, 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'synset': 'headset.n.01', 'synonyms': ['headset'], 'id': 551, 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'id': 552, 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'c', 'synset': 'heart.n.02', 'synonyms': ['heart'], 'id': 553, 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'id': 554, 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'id': 555, 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'id': 556, 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'synset': 'heron.n.02', 'synonyms': ['heron'], 'id': 557, 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'id': 558, 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'id': 559, 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'id': 560, 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'id': 561, 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'id': 562, 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'id': 563, 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'synset': 'honey.n.01', 'synonyms': ['honey'], 'id': 564, 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'id': 565, 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'synset': 'hook.n.05', 'synonyms': ['hook'], 'id': 566, 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'r', 'synset': 'hookah.n.01', 'synonyms': ['hookah', 'narghile', 'nargileh', 'sheesha', 'shisha', 'water_pipe'], 'id': 567, 'def': 'a tobacco pipe with a long flexible tube connected to a container where the smoke is cooled by passing through water', 'name': 'hookah'}, {'frequency': 'r', 'synset': 'hornet.n.01', 'synonyms': ['hornet'], 'id': 568, 'def': 'large stinging wasp', 'name': 'hornet'}, {'frequency': 'f', 'synset': 'horse.n.01', 'synonyms': ['horse'], 'id': 569, 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'id': 570, 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'id': 571, 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'id': 572, 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'id': 573, 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'id': 574, 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'id': 575, 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'c', 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'id': 576, 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'id': 577, 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'f', 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'id': 578, 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'id': 579, 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'id': 580, 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'id': 581, 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'id': 582, 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'id': 583, 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'c', 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'id': 584, 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'id': 585, 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'f', 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'id': 586, 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'id': 587, 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'c', 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'id': 588, 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'id': 589, 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'c', 'synset': 'jam.n.01', 'synonyms': ['jam'], 'id': 590, 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'synset': 'jar.n.01', 'synonyms': ['jar'], 'id': 591, 'def': 'a vessel (usually cylindrical) with a wide mouth and without handles', 'name': 'jar'}, {'frequency': 'f', 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'id': 592, 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'id': 593, 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'id': 594, 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'id': 595, 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'id': 596, 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'r', 'synset': 'jewel.n.01', 'synonyms': ['jewel', 'gem', 'precious_stone'], 'id': 597, 'def': 'a precious or semiprecious stone incorporated into a piece of jewelry', 'name': 'jewel'}, {'frequency': 'c', 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'id': 598, 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'id': 599, 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'c', 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'id': 600, 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'id': 601, 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'synset': 'keg.n.02', 'synonyms': ['keg'], 'id': 602, 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'id': 603, 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'id': 604, 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'synset': 'key.n.01', 'synonyms': ['key'], 'id': 605, 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'id': 606, 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'c', 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'id': 607, 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'id': 608, 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'id': 609, 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'r', 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'id': 610, 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'synset': 'kite.n.03', 'synonyms': ['kite'], 'id': 611, 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'id': 612, 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'id': 613, 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'id': 614, 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'synset': 'knife.n.01', 'synonyms': ['knife'], 'id': 615, 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'id': 616, 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'synset': 'knob.n.02', 'synonyms': ['knob'], 'id': 617, 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'id': 618, 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'id': 619, 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'id': 620, 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'id': 621, 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'id': 622, 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'c', 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'id': 623, 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'f', 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'id': 624, 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'id': 625, 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'id': 626, 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'id': 627, 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'id': 628, 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'id': 629, 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'id': 630, 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'id': 631, 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'id': 632, 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'f', 'synset': 'latch.n.02', 'synonyms': ['latch'], 'id': 633, 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'id': 634, 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'synset': 'leather.n.01', 'synonyms': ['leather'], 'id': 635, 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'id': 636, 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'id': 637, 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'r', 'synset': 'legume.n.02', 'synonyms': ['legume'], 'id': 638, 'def': 'the fruit or seed of bean or pea plants', 'name': 'legume'}, {'frequency': 'f', 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'id': 639, 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'id': 640, 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'id': 641, 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'id': 642, 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'id': 643, 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'id': 644, 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'id': 645, 'def': 'lightblub/source of light', 'name': 'lightbulb'}, {'frequency': 'r', 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'id': 646, 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'f', 'synset': 'lime.n.06', 'synonyms': ['lime'], 'id': 647, 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'id': 648, 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'c', 'synset': 'lion.n.01', 'synonyms': ['lion'], 'id': 649, 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'id': 650, 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'r', 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'id': 651, 'def': 'liquor or beer', 'name': 'liquor'}, {'frequency': 'c', 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'id': 652, 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'f', 'synset': 'log.n.01', 'synonyms': ['log'], 'id': 653, 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'id': 654, 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'f', 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'id': 655, 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'id': 656, 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'id': 657, 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'id': 658, 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'id': 659, 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'c', 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'id': 660, 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'f', 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'id': 661, 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'synset': 'mallard.n.01', 'synonyms': ['mallard'], 'id': 662, 'def': 'wild dabbling duck from which domestic ducks are descended', 'name': 'mallard'}, {'frequency': 'r', 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'id': 663, 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'id': 664, 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'r', 'synset': 'manatee.n.01', 'synonyms': ['manatee'], 'id': 665, 'def': 'sirenian mammal of tropical coastal waters of America', 'name': 'manatee'}, {'frequency': 'c', 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'id': 666, 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'id': 667, 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'id': 668, 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'f', 'synset': 'map.n.01', 'synonyms': ['map'], 'id': 669, 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'f', 'synset': 'marker.n.03', 'synonyms': ['marker'], 'id': 670, 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'synset': 'martini.n.01', 'synonyms': ['martini'], 'id': 671, 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'id': 672, 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'id': 673, 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'synset': 'masher.n.02', 'synonyms': ['masher'], 'id': 674, 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'id': 675, 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'synset': 'mast.n.01', 'synonyms': ['mast'], 'id': 676, 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'id': 677, 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'id': 678, 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'id': 679, 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'id': 680, 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'id': 681, 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'id': 682, 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'id': 683, 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'c', 'synset': 'melon.n.01', 'synonyms': ['melon'], 'id': 684, 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'id': 685, 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'id': 686, 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'id': 687, 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'id': 688, 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'f', 'synset': 'milk.n.01', 'synonyms': ['milk'], 'id': 689, 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'r', 'synset': 'milk_can.n.01', 'synonyms': ['milk_can'], 'id': 690, 'def': 'can for transporting milk', 'name': 'milk_can'}, {'frequency': 'r', 'synset': 'milkshake.n.01', 'synonyms': ['milkshake'], 'id': 691, 'def': 'frothy drink of milk and flavoring and sometimes fruit or ice cream', 'name': 'milkshake'}, {'frequency': 'f', 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'id': 692, 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'id': 693, 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'id': 694, 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'id': 695, 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'id': 696, 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'synset': 'money.n.03', 'synonyms': ['money'], 'id': 697, 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'id': 698, 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'id': 699, 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'synset': 'motor.n.01', 'synonyms': ['motor'], 'id': 700, 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'id': 701, 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'id': 702, 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'f', 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'id': 703, 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'id': 704, 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'f', 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'id': 705, 'def': 'a computer input device that controls an on-screen pointer (does not include trackpads / touchpads)', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'id': 706, 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'id': 707, 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'synset': 'mug.n.04', 'synonyms': ['mug'], 'id': 708, 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'id': 709, 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'id': 710, 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'c', 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'id': 711, 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'id': 712, 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'f', 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'id': 713, 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'id': 714, 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'id': 715, 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'id': 716, 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'c', 'synset': 'needle.n.03', 'synonyms': ['needle'], 'id': 717, 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'synset': 'nest.n.01', 'synonyms': ['nest'], 'id': 718, 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'f', 'synset': 'newspaper.n.01', 'synonyms': ['newspaper', 'paper_(newspaper)'], 'id': 719, 'def': 'a daily or weekly publication on folded sheets containing news, articles, and advertisements', 'name': 'newspaper'}, {'frequency': 'c', 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'id': 720, 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'id': 721, 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'id': 722, 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'c', 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'id': 723, 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'id': 724, 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'id': 725, 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'f', 'synset': 'nut.n.03', 'synonyms': ['nut'], 'id': 726, 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'id': 727, 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'f', 'synset': 'oar.n.01', 'synonyms': ['oar'], 'id': 728, 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'id': 729, 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'id': 730, 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'id': 731, 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'id': 732, 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'id': 733, 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'synset': 'onion.n.01', 'synonyms': ['onion'], 'id': 734, 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'id': 735, 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'id': 736, 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'c', 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'id': 737, 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'f', 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'id': 738, 'def': 'a thick standalone cushion used as a seat or footrest, often next to a chair', 'name': 'ottoman'}, {'frequency': 'f', 'synset': 'oven.n.01', 'synonyms': ['oven'], 'id': 739, 'def': 'kitchen appliance used for baking or roasting', 'name': 'oven'}, {'frequency': 'c', 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'id': 740, 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'synset': 'owl.n.01', 'synonyms': ['owl'], 'id': 741, 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'synset': 'packet.n.03', 'synonyms': ['packet'], 'id': 742, 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'id': 743, 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'synset': 'pad.n.04', 'synonyms': ['pad'], 'id': 744, 'def': 'mostly arm/knee pads labeled', 'name': 'pad'}, {'frequency': 'f', 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'id': 745, 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'id': 746, 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'c', 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'id': 747, 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'synset': 'painting.n.01', 'synonyms': ['painting'], 'id': 748, 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'f', 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'id': 749, 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'id': 750, 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'id': 751, 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'id': 752, 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'id': 753, 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'id': 754, 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'id': 755, 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'f', 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'id': 756, 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'id': 757, 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'id': 758, 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'id': 759, 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'id': 760, 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'c', 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'id': 761, 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'id': 762, 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'c', 'synset': 'parasol.n.01', 'synonyms': ['parasol', 'sunshade'], 'id': 763, 'def': 'a handheld collapsible source of shade', 'name': 'parasol'}, {'frequency': 'r', 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'id': 764, 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'c', 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'id': 765, 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'id': 766, 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'id': 767, 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'id': 768, 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'id': 769, 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'c', 'synset': 'passport.n.02', 'synonyms': ['passport'], 'id': 770, 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'id': 771, 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'id': 772, 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'id': 773, 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'synset': 'peach.n.03', 'synonyms': ['peach'], 'id': 774, 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'id': 775, 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'f', 'synset': 'pear.n.01', 'synonyms': ['pear'], 'id': 776, 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'c', 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'id': 777, 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'synset': 'peg.n.04', 'synonyms': ['wooden_leg', 'pegleg'], 'id': 778, 'def': 'a prosthesis that replaces a missing leg', 'name': 'wooden_leg'}, {'frequency': 'r', 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'id': 779, 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'id': 780, 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'synset': 'pen.n.01', 'synonyms': ['pen'], 'id': 781, 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'f', 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'id': 782, 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'id': 783, 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'id': 784, 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'id': 785, 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'id': 786, 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'id': 787, 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'id': 788, 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'f', 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'id': 789, 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'id': 790, 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'id': 791, 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'id': 792, 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'synset': 'person.n.01', 'synonyms': ['person', 'baby', 'child', 'boy', 'girl', 'man', 'woman', 'human'], 'id': 793, 'def': 'a human being', 'name': 'person'}, {'frequency': 'c', 'synset': 'pet.n.01', 'synonyms': ['pet'], 'id': 794, 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'c', 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'id': 795, 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'id': 796, 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'id': 797, 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'f', 'synset': 'piano.n.01', 'synonyms': ['piano'], 'id': 798, 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'id': 799, 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'id': 800, 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'synset': 'pie.n.01', 'synonyms': ['pie'], 'id': 801, 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'id': 802, 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'id': 803, 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'id': 804, 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'id': 805, 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'id': 806, 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'id': 807, 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'id': 808, 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'id': 809, 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'id': 810, 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'id': 811, 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'id': 812, 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'c', 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'id': 813, 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'id': 814, 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'id': 815, 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'id': 816, 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'id': 817, 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'synset': 'plate.n.04', 'synonyms': ['plate'], 'id': 818, 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'synset': 'platter.n.01', 'synonyms': ['platter'], 'id': 819, 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'id': 820, 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'id': 821, 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'id': 822, 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'synset': 'plume.n.02', 'synonyms': ['plume'], 'id': 823, 'def': 'a feather or cluster of feathers worn as an ornament', 'name': 'plume'}, {'frequency': 'r', 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'id': 824, 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'id': 825, 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'id': 826, 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'id': 827, 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'f', 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'id': 828, 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'id': 829, 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'synset': 'pony.n.05', 'synonyms': ['pony'], 'id': 830, 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'id': 831, 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'id': 832, 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'c', 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'id': 833, 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'id': 834, 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'id': 835, 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'synset': 'pot.n.01', 'synonyms': ['pot'], 'id': 836, 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'id': 837, 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'synset': 'potato.n.01', 'synonyms': ['potato'], 'id': 838, 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'id': 839, 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'id': 840, 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'id': 841, 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'c', 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'id': 842, 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'id': 843, 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'c', 'synset': 'pretzel.n.01', 'synonyms': ['pretzel'], 'id': 844, 'def': 'glazed and salted cracker typically in the shape of a loose knot', 'name': 'pretzel'}, {'frequency': 'f', 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'id': 845, 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'id': 846, 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'synset': 'projector.n.02', 'synonyms': ['projector'], 'id': 847, 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'id': 848, 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'synset': 'prune.n.01', 'synonyms': ['prune'], 'id': 849, 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'id': 850, 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'id': 851, 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'id': 852, 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'id': 853, 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'id': 854, 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'id': 855, 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'id': 856, 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'c', 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'id': 857, 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'id': 858, 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'id': 859, 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'id': 860, 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'id': 861, 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'id': 862, 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'id': 863, 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'synset': 'radar.n.01', 'synonyms': ['radar'], 'id': 864, 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'f', 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'id': 865, 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'id': 866, 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'id': 867, 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'synset': 'raft.n.01', 'synonyms': ['raft'], 'id': 868, 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'id': 869, 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'id': 870, 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'id': 871, 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'id': 872, 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'synset': 'rat.n.01', 'synonyms': ['rat'], 'id': 873, 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'id': 874, 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'id': 875, 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'id': 876, 'def': 'vehicle mirror (side or rearview)', 'name': 'rearview_mirror'}, {'frequency': 'c', 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'id': 877, 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'id': 878, 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'c', 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'id': 879, 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'f', 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'id': 880, 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'id': 881, 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'id': 882, 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'id': 883, 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'c', 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'id': 884, 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'synset': 'ring.n.08', 'synonyms': ['ring'], 'id': 885, 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'id': 886, 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'id': 887, 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'synset': 'robe.n.01', 'synonyms': ['robe'], 'id': 888, 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'id': 889, 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'synset': 'rodent.n.01', 'synonyms': ['rodent'], 'id': 890, 'def': 'relatively small placental mammals having a single pair of constantly growing incisor teeth specialized for gnawing', 'name': 'rodent'}, {'frequency': 'r', 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'id': 891, 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'id': 892, 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'id': 893, 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'id': 894, 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'id': 895, 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'id': 896, 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'id': 897, 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'id': 898, 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'id': 899, 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'id': 900, 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'id': 901, 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'id': 902, 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'f', 'synset': 'sail.n.01', 'synonyms': ['sail'], 'id': 903, 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'f', 'synset': 'salad.n.01', 'synonyms': ['salad'], 'id': 904, 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'id': 905, 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'c', 'synset': 'salami.n.01', 'synonyms': ['salami'], 'id': 906, 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'c', 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'id': 907, 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'id': 908, 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'c', 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'id': 909, 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'id': 910, 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'id': 911, 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'id': 912, 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'id': 913, 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'id': 914, 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'id': 915, 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'id': 916, 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'id': 917, 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'id': 918, 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'id': 919, 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'id': 920, 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'id': 921, 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'id': 922, 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'id': 923, 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'f', 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'id': 924, 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'r', 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'id': 925, 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'c', 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'id': 926, 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'f', 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'id': 927, 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'id': 928, 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'c', 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'id': 929, 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'c', 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'id': 930, 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'id': 931, 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'id': 932, 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'c', 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'id': 933, 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'c', 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'id': 934, 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'id': 935, 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'c', 'synset': 'shark.n.01', 'synonyms': ['shark'], 'id': 936, 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'id': 937, 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'id': 938, 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'id': 939, 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'id': 940, 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'id': 941, 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'synset': 'shears.n.01', 'synonyms': ['shears'], 'id': 942, 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'id': 943, 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'id': 944, 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'id': 945, 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'c', 'synset': 'shield.n.02', 'synonyms': ['shield'], 'id': 946, 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'id': 947, 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'id': 948, 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'f', 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'id': 949, 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'id': 950, 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'id': 951, 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'id': 952, 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'f', 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'id': 953, 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'id': 954, 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'id': 955, 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'r', 'synset': 'shower_cap.n.01', 'synonyms': ['shower_cap'], 'id': 956, 'def': 'a tight cap worn to keep hair dry while showering', 'name': 'shower_cap'}, {'frequency': 'f', 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'id': 957, 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'id': 958, 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'f', 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'id': 959, 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'synset': 'silo.n.01', 'synonyms': ['silo'], 'id': 960, 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'synset': 'sink.n.01', 'synonyms': ['sink'], 'id': 961, 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'id': 962, 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'id': 963, 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'synset': 'ski.n.01', 'synonyms': ['ski'], 'id': 964, 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'id': 965, 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'id': 966, 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'id': 967, 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'id': 968, 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'r', 'synset': 'skullcap.n.01', 'synonyms': ['skullcap'], 'id': 969, 'def': 'rounded brimless cap fitting the crown of the head', 'name': 'skullcap'}, {'frequency': 'c', 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'id': 970, 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'id': 971, 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'id': 972, 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'id': 973, 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'id': 974, 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'id': 975, 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'id': 976, 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'id': 977, 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'id': 978, 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'synset': 'soap.n.01', 'synonyms': ['soap'], 'id': 979, 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'id': 980, 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'synset': 'sock.n.01', 'synonyms': ['sock'], 'id': 981, 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'f', 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'id': 982, 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'synset': 'softball.n.01', 'synonyms': ['softball'], 'id': 983, 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'id': 984, 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'id': 985, 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'f', 'synset': 'soup.n.01', 'synonyms': ['soup'], 'id': 986, 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'id': 987, 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'id': 988, 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'id': 989, 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'id': 990, 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'id': 991, 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'id': 992, 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'id': 993, 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'id': 994, 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'id': 995, 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'id': 996, 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'c', 'synset': 'spider.n.01', 'synonyms': ['spider'], 'id': 997, 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'r', 'synset': 'spiny_lobster.n.02', 'synonyms': ['crawfish', 'crayfish'], 'id': 998, 'def': 'large edible marine crustacean having a spiny carapace but lacking the large pincers of true lobsters', 'name': 'crawfish'}, {'frequency': 'c', 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'id': 999, 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'id': 1000, 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'id': 1001, 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'id': 1002, 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'synset': 'squid.n.01', 'synonyms': ['squid_(food)', 'calamari', 'calamary'], 'id': 1003, 'def': '(Italian cuisine) squid prepared as food', 'name': 'squid_(food)'}, {'frequency': 'c', 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'id': 1004, 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'r', 'synset': 'stagecoach.n.01', 'synonyms': ['stagecoach'], 'id': 1005, 'def': 'a large coach-and-four formerly used to carry passengers and mail on regular routes between towns', 'name': 'stagecoach'}, {'frequency': 'c', 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'id': 1006, 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'c', 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'id': 1007, 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'id': 1008, 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'id': 1009, 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'id': 1010, 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'f', 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'id': 1011, 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'id': 1012, 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'id': 1013, 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'id': 1014, 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'synset': 'stew.n.02', 'synonyms': ['stew'], 'id': 1015, 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'id': 1016, 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'id': 1017, 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'f', 'synset': 'stool.n.01', 'synonyms': ['stool'], 'id': 1018, 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'id': 1019, 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'id': 1020, 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'id': 1021, 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'id': 1022, 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'synset': 'strap.n.01', 'synonyms': ['strap'], 'id': 1023, 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'id': 1024, 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'id': 1025, 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'id': 1026, 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'id': 1027, 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'id': 1028, 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'id': 1029, 'def': 'a pointed tool for writing or drawing or engraving, including pens', 'name': 'stylus'}, {'frequency': 'r', 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'id': 1030, 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'id': 1031, 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'id': 1032, 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'f', 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'id': 1033, 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'id': 1034, 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'id': 1035, 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'id': 1036, 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'f', 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'id': 1037, 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'id': 1038, 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'synset': 'swab.n.02', 'synonyms': ['mop'], 'id': 1039, 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'id': 1040, 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'id': 1041, 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'id': 1042, 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'id': 1043, 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'id': 1044, 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'id': 1045, 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'synset': 'sword.n.01', 'synonyms': ['sword'], 'id': 1046, 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'id': 1047, 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'id': 1048, 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'id': 1049, 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'synset': 'table.n.02', 'synonyms': ['table'], 'id': 1050, 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'id': 1051, 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'id': 1052, 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'id': 1053, 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'synset': 'taco.n.02', 'synonyms': ['taco'], 'id': 1054, 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'synset': 'tag.n.02', 'synonyms': ['tag'], 'id': 1055, 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'id': 1056, 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'id': 1057, 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'id': 1058, 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'f', 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'id': 1059, 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'id': 1060, 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'f', 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'id': 1061, 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'id': 1062, 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'id': 1063, 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'id': 1064, 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'id': 1065, 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'id': 1066, 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'c', 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'id': 1067, 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'id': 1068, 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'id': 1069, 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'f', 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'id': 1070, 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'id': 1071, 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'id': 1072, 'def': 'electronic device for communicating by voice over long distances (includes wired and wireless/cell phones)', 'name': 'telephone'}, {'frequency': 'c', 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'id': 1073, 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'id': 1074, 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'id': 1075, 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'id': 1076, 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'id': 1077, 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'id': 1078, 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'id': 1079, 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'id': 1080, 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'id': 1081, 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'id': 1082, 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'f', 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'id': 1083, 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'id': 1084, 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'id': 1085, 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'id': 1086, 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'id': 1087, 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'id': 1088, 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'id': 1089, 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'id': 1090, 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'id': 1091, 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'c', 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'id': 1092, 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'id': 1093, 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'id': 1094, 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'id': 1095, 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'f', 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'id': 1096, 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'id': 1097, 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'id': 1098, 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'id': 1099, 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'f', 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'id': 1100, 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'id': 1101, 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'id': 1102, 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'id': 1103, 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'f', 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'id': 1104, 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'f', 'synset': 'top.n.09', 'synonyms': ['cover'], 'id': 1105, 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'id': 1106, 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'id': 1107, 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'synset': 'towel.n.01', 'synonyms': ['towel'], 'id': 1108, 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'id': 1109, 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'synset': 'toy.n.03', 'synonyms': ['toy'], 'id': 1110, 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'id': 1111, 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'id': 1112, 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'c', 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'id': 1113, 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'f', 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'id': 1114, 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'id': 1115, 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'id': 1116, 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'synset': 'tray.n.01', 'synonyms': ['tray'], 'id': 1117, 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'id': 1118, 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'id': 1119, 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'c', 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'id': 1120, 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'f', 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'id': 1121, 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'id': 1122, 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'synset': 'truck.n.01', 'synonyms': ['truck'], 'id': 1123, 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'id': 1124, 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'id': 1125, 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'synset': 'tub.n.02', 'synonyms': ['vat'], 'id': 1126, 'def': 'a large vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'synset': 'turban.n.01', 'synonyms': ['turban'], 'id': 1127, 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'c', 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'id': 1128, 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'id': 1129, 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'id': 1130, 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'c', 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'id': 1131, 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'c', 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'id': 1132, 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'id': 1133, 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'f', 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'id': 1134, 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'id': 1135, 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'f', 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'id': 1136, 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'c', 'synset': 'urn.n.01', 'synonyms': ['urn'], 'id': 1137, 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'id': 1138, 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'f', 'synset': 'vase.n.01', 'synonyms': ['vase'], 'id': 1139, 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'id': 1140, 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'id': 1141, 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'f', 'synset': 'vest.n.01', 'synonyms': ['vest', 'waistcoat'], 'id': 1142, 'def': "a man's sleeveless garment worn underneath a coat", 'name': 'vest'}, {'frequency': 'c', 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'id': 1143, 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'id': 1144, 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'id': 1145, 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'id': 1146, 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'c', 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'id': 1147, 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'id': 1148, 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'id': 1149, 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'id': 1150, 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'id': 1151, 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'id': 1152, 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'id': 1153, 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'id': 1154, 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'id': 1155, 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'f', 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'id': 1156, 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'id': 1157, 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'id': 1158, 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'synset': 'washbasin.n.01', 'synonyms': ['washbasin', 'basin_(for_washing)', 'washbowl', 'washstand', 'handbasin'], 'id': 1159, 'def': 'a bathroom sink that is permanently installed and connected to a water supply and drainpipe; where you can wash your hands and face', 'name': 'washbasin'}, {'frequency': 'c', 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'id': 1160, 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'id': 1161, 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'id': 1162, 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'id': 1163, 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'id': 1164, 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'id': 1165, 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'c', 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'id': 1166, 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'id': 1167, 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'id': 1168, 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'id': 1169, 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'id': 1170, 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'id': 1171, 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'f', 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'id': 1172, 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'id': 1173, 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'id': 1174, 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'id': 1175, 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'id': 1176, 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'id': 1177, 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'id': 1178, 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'id': 1179, 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'id': 1180, 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'c', 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'id': 1181, 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'c', 'synset': 'wig.n.01', 'synonyms': ['wig'], 'id': 1182, 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'id': 1183, 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'id': 1184, 'def': 'A mill or turbine that is powered by wind', 'name': 'windmill'}, {'frequency': 'c', 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'id': 1185, 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'id': 1186, 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'id': 1187, 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'id': 1188, 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'c', 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'id': 1189, 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'id': 1190, 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'f', 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'id': 1191, 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'synset': 'wok.n.01', 'synonyms': ['wok'], 'id': 1192, 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'id': 1193, 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'id': 1194, 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'id': 1195, 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'id': 1196, 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'f', 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'id': 1197, 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'id': 1198, 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'c', 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'id': 1199, 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'c', 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'id': 1200, 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'c', 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'id': 1201, 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'id': 1202, 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'id': 1203, 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa -# fmt: on diff --git a/spaces/AzinZ/vitscn/mel_processing.py b/spaces/AzinZ/vitscn/mel_processing.py deleted file mode 100644 index 8e7c415c9194d0db98736daad74a86d8943bc31a..0000000000000000000000000000000000000000 --- a/spaces/AzinZ/vitscn/mel_processing.py +++ /dev/null @@ -1,112 +0,0 @@ -import math -import os -import random -import torch -from torch import nn -import torch.nn.functional as F -import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/Benson/text-generation/Examples/50 30 Yoruba Pelcula Descargar.md b/spaces/Benson/text-generation/Examples/50 30 Yoruba Pelcula Descargar.md deleted file mode 100644 index df4cfe4973a51da663a06b8b76a5b0bc10a61593..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/50 30 Yoruba Pelcula Descargar.md +++ /dev/null @@ -1,51 +0,0 @@ -
-

50/30 Descargar película yoruba: Una comedia de errores

-

Si usted está buscando una hilarante y entretenida película de Yoruba para ver, es posible que desee revisar 50/30. Esta es una película de comedia que sigue las aventuras de tres amigos que están desesperados por ganar dinero. En este artículo, te contaremos todo lo que necesitas saber sobre la descarga de la película Yoruba 50/30, incluyendo la trama, el elenco, las críticas y cómo obtenerla en línea. También hablaremos de los beneficios de ver películas yorubas en general. ¡Así que siéntate y disfruta!

-

La trama de 50/30 Yoruba película

-

50/30 es una película de comedia que se estrenó en 2021. Cuenta la historia de tres amigos íntimos, Sanyeri, Adekola Tijani y No Network, que siempre están buscando formas de hacer dinero rápido. Deciden buscar la ayuda de un espiritista, quien les hace una extraña petición. Tienen que encontrar a una mujer de 50 años, pero parece que tiene 30. Los amigos se embarcan en un viaje hilarante para encontrar a tal mujer, pero se encuentran con muchos desafíos y sorpresas en el camino. ¿Podrán cumplir su misión y hacerse ricos? ¡Tienes que ver la película para averiguarlo!

-

50 30 yoruba película descargar


Download File ··· https://bltlly.com/2v6JDc



-

El reparto y el equipo de 50/30 Yoruba película

-

50/30 presenta algunos de los actores y actrices más populares y talentosos de la industria cinematográfica yoruba. Estos son algunos de ellos:

- -

La película fue dirigida por Abiodun Jimoh, quien también es productor y escritor. Ha dirigido películas como Ewure Abami, Alagbara Meji y Omo Ekun.

-

La recepción y los comentarios de 50/30 Yoruba película

-

50/30 ha recibido comentarios positivos de los espectadores y críticos por igual. La película ha sido elogiada por su divertida y atractiva trama, su ingenioso diálogo, sus coloridos personajes y su producción de calidad. La película también ha sido nominada a varios premios, como Mejor Película de Comedia, Mejor Actor en un Papel de Comedia, Mejor Director y Mejor Guion en los Yoruba Movie Academy Awards (YMAA). La película también ha obtenido más de 600.000 visitas en YouTube, lo que muestra su popularidad entre

Cómo descargar 50/30 Yoruba Movie Online

-

Si usted está interesado en ver 50/30 Yoruba película, es posible que se pregunte cómo descargarlo en línea. Hay varias maneras de hacerlo, pero hay que tener cuidado con la fuente y la calidad de la descarga. Aquí hay algunos consejos sobre cómo descargar 50/30 Yoruba película en línea:

-
    -
  1. Utilice un sitio web legal y de buena reputación que ofrece películas yorubas para descargar. Algunos ejemplos son [YorubaPlay], [Yorubahood] y [OkikiTV]. Estos sitios web tienen una gran colección de películas yorubas, incluyendo 50/30, que se puede descargar por una pequeña tarifa o de forma gratuita. También tienen vídeos y subtítulos de alta calidad para su comodidad.
  2. -
  3. Utilice un software o aplicación de descarga confiable y segura que puede ayudarlo a descargar películas yorubas de YouTube u otras plataformas. Algunos ejemplos son [VidMate], [TubeMate], y [SnapTube]. Estos programas o aplicaciones le permiten descargar películas yoruba en varios formatos y resoluciones, como MP4, 3GP, HD y 4K. También tienen funciones que te permiten pausar y reanudar las descargas, gestionar tus descargas y compartirlas con otros.
  4. - -
-

Sin embargo, antes de descargar cualquier película yoruba en línea, asegúrese de tener una buena conexión a Internet, suficiente espacio de almacenamiento y un dispositivo compatible. También debe ser consciente de los riesgos de la descarga de fuentes ilegales o no verificadas, como malware, virus, spyware y violación de derechos de autor. Siempre debes respetar los derechos de los creadores y productores de las películas y apoyarlos pagando por su trabajo.

-

Los beneficios de ver películas yoruba

-

Ver películas yorubas no solo es divertido y entretenido, sino también beneficioso de muchas maneras. Estos son algunos de los beneficios de ver películas yorubas:

- -

Conclusión

- -

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes acerca de 50/30 Yoruba película descargar:

-
    -
  1. Q: ¿Dónde puedo ver 50/30 Yoruba película en línea sin descargarla?
    -R: Puedes ver 50/30 películas de Yoruba en línea en plataformas de streaming como [Netflix], [iRokoTV] y [YouTube]. Sin embargo, es posible que tenga que pagar una cuota de suscripción o ver algunos anuncios para acceder a la película.
  2. -
  3. P: ¿Cuál es el significado de 50/30 en el título de la película?
    -R: 50/30 se refiere a la edad y apariencia de la mujer que los tres amigos están buscando. Necesitan encontrar una mujer que tenga 50 años pero parezca que tiene 30 años.
  4. -
  5. Q: ¿Cuánto tiempo es 50/30 película yoruba?
    -R: 50/30 La película yoruba dura unos 90 minutos.
  6. -
  7. Q: ¿Es la película Yoruba 50/30 adecuada para niños?
    -R: La película yoruba de 50/30 tiene una clasificación PG-13, lo que significa que algunas escenas o lenguaje pueden no ser apropiados para niños menores de 13 años. Se recomienda orientación parental.
  8. -
  9. Q: ¿Cuáles son algunas otras películas de comedia yoruba que puedo ver?
    -R: Algunas otras películas de comedia yoruba que puedes ver son [Omo Ghetto The Saga], [Alakada Reloaded], [Diario de Jenifa], [Merry Men] y [Chief Daddy].
  10. -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Charger Play Store.md b/spaces/Benson/text-generation/Examples/Charger Play Store.md deleted file mode 100644 index bf5f2766dc2b360f3748efed74ade1277946a5be..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Charger Play Store.md +++ /dev/null @@ -1,126 +0,0 @@ - -
- Advantages and disadvantages of the Play Store
- Examples of content available on the Play Store | | H2: How to download and install the Play Store on your Android device? | - Check device compatibility
- Enable unknown sources in settings
- Download Play Store APK file from a trusted site
- Install APK file and launch Play Store | | H2: How to update the Play Store and its apps? | - Enable automatic updates in Play Store settings
- Manually check for available updates
- Download and install updates | | H2: How to use the Play Store to search, download and manage its applications? | - Use the search bar or categories to find applications
- View application descriptions and user reviews
- Download and install applications of your choice
- Access its installed applications and uninstall them if necessary | | H2: What are the alternatives to the Play Store to download Android applications? | - Present the main criteria for choosing an alternative to the Play Store
- Compare some popular alternatives to the Play Store (APKMirror, Aurora Store, Aptoide, etc.)
- Give safety tips and caution for using alternative sources | | H1: Conclusion: summarize the key points of the article and give your personal opinion on the Play Store | - Remember what the Play Store is and how to use it
- Highlight the advantages and limitations of the Play Store
- Invite readers to share their experience and questions about the Play Store | Table 2: Article with HTML formatting

Download Play Store: how to access the Google app store on Android

- -

What is the Play Store and why use it?

-

The Play Store is an app that gives you access to a catalogue of over 2.5 million apps for your Android device. This is Google’s official store, which guarantees you secure downloads, regular updates, and optimal compatibility with your device. The Play Store also offers you the possibility to buy or rent films, series, books, magazines, or even music.

-

One of the main advantages of the Play Store is its ease of use. You can easily search for applications by name, category, or keywords. You can view detailed descriptions of applications, as well as notes and comments from other users. You can also enjoy personalized recommendations based on your tastes and habits. Finally, you can manage your installed applications from a single interface.

-

charger play store


DOWNLOAD ->>> https://bltlly.com/2v6L6p



-

However, the Play Store also has some drawbacks. First, it requires a Google account to function, which can cause privacy or security issues for some users. Then, it imposes certain rules on app developers, which can limit the diversity and creativity of the content offered

Finally, the Play Store may not be compatible with some Android devices, especially the oldest or rarest, which may prevent access to certain applications.

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
CategoryExamples
GamesCandy Crush Saga, Among Us, PUBG Mobile, etc.
Social NetworksFacebook, Instagram, WhatsApp, etc.
UtilitiesGoogle Maps, Gmail, Waze, etc.
Photo EditorsPicsArt, Snapseed, Adobe Photoshop Express, etc.
Music PlayersSpotify, Deezer, YouTube Music, etc.
FilmsAvengers: Endgame, Joker, Parasite, etc.
SeriesStranger Things, The Witcher, The Mandalorian, etc.
BooksHarry Potter, Lord of the Rings, Plague, etc.
MusicEd Sheeran, Billie Eilish, BTS, etc.
-

How do I download and install the Play Store on your Android device?

-

If you have a recent and official Android device, chances are the Play Store is already installed by default. You can check this by searching the Play Store icon on your home screen or in your app drawer. If you find it, just tap it to launch the Play Store and log in with your Google account.

-

If you don’t have the Play Store on your device, or want to reinstall it for any reason, you can download and install it manually. Here are the steps:

- -

How do I update the Play Store and its applications?

-

To take full advantage of the Play Store and its applications, it is important to keep them up to date. Updates give you access to new features, bug fixes, and performance improvements. Here is how to update the Play Store and its applications:

-

How do I use the Play Store to search, download and manage its applications?

-

The Play Store is a very easy-to-use app that lets you find, download, and manage your apps with just a few clicks. Here’s how to:

- -

What are the alternatives to the Play Store to download Android apps?

-

The Play Store is not the only source of applications for your Android device. There are other ways to download apps, which may have some advantages over the Play Store. However, you should also be careful and vigilant when using alternative sources, as they may pose risks to your device or personal data. Here are some criteria to consider when choosing an alternative to the Play Store:

- -

Here are some examples of popular alternatives to the Play Store:

-

- - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescriptionBenefitsDisadvantages
APKMirrorA website that offers free APK files of Android apps. - A wide range of applications
- Updated versions
- Applications not available on the Play Store
- A security risk if the APK file is modified or infected
- Manual installation required
Aurora StoreAn app that allows you to access the Play Store catalogue without needing a Google account.- Simple and intuitive interface
- Compatibility with most Android devices
- Ability to download regional or restricted applications
- A risk of violation of Google’s terms of use
- Reliance on the Play Store for updates
AptoideAn app that allows you to create and manage your own Android app store.- Complete freedom for developers and users
- An active and participatory community
- Opportunity to create thematic or custom shops
- A risk of pirating or infringing applications
- Variable quality of the proposed applications
-

Conclusion: summarize the key points of the article and give your personal opinion on the Play Store

-

In this article, we covered what the Play Store is, why you should use it, how to download and install it on your Android device, how to update and manage its applications, and what alternatives to the Play Store are available. We hope this article has been helpful and that you have learned some interesting things about the Play Store.

- -

However, the Play Store is not perfect. It can present problems of confidentiality, diversity, or compatibility with certain devices. It may also not meet your specific needs or your desires for discovery. That’s why there are alternatives to the Play Store, which can offer you more freedom, personalization, or creativity. But be careful, these alternatives may also involve risks for your device or your personal data. Caution and vigilance should be exercised when using alternative sources.

-

I use the Play Store as my main application source for my Android device. I find that it is a reliable application, easy to use, and that offers applications adapted to my tastes. I regularly update the Play Store and my apps to take advantage of new features and bug fixes. I also check the opinions of other users to get an idea of the quality of the applications before downloading them.

-

But I also don’t hesitate to use alternatives to the Play Store when I want to discover applications that are not available on the Play Store, or that are more suitable for my device. For example, I use APKMirror to download the latest versions of the apps I like, or Aurora Store to access regional or restricted apps. I always make sure to check the security and compatibility of the applications I download from these alternative sources.

-

What is your experience with the Play Store? What are your favorite apps on the Play Store? What alternatives to the Play Store do you use? Feel free to share your thoughts and questions in the comments below!

-

FAQs

-

Here are some frequently asked questions about the Play Store and its alternatives:

- -
  • How do I create a Google account to use the Play Store?
    To create a Google account, you can either use an existing Gmail address or create a new Gmail address. You can then sign in with your Google account on the Play Store from your Android device.
  • -
  • How do I turn off automatic updates from the Play Store?
    To disable automatic updates to the Play Store, you can go to the Play Store settings under the heading "Automatically update applications" and choose the option "Do not automatically update applications". You will then need to manually check and install the available updates.
  • -
  • How do I remove the Play Store from my Android device?
    To remove the Play Store from your Android device, you must have root access on your device, that is, access to system files. You can then use an application like Titanium Backup or Root Uninstaller to uninstall the Play Store. Please note that this operation may cause malfunctions or data loss on your device.
  • -
  • How do I download free paid apps from the Play Store?
    There is no legal and safe way to download free paid apps from the Play Store. If you find sites or apps that offer to do so, it is probably scams or hacks, which can harm your device or your personal data. We therefore advise you to respect copyright and pay for the applications you wish to use.
  • - -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Cmo Descargar Pokerstars En Pases Prohibidos.md b/spaces/Benson/text-generation/Examples/Cmo Descargar Pokerstars En Pases Prohibidos.md deleted file mode 100644 index 34af443003d9890277b0b66de255afcd6feb772f..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cmo Descargar Pokerstars En Pases Prohibidos.md +++ /dev/null @@ -1,62 +0,0 @@ -
    -

    Cómo descargar PokerStars en países prohibidos

    -

    Si eres un fan del poker online, probablemente hayas oído hablar de PokerStars, el sitio de poker online más grande y popular del mundo. PokerStars ofrece una amplia gama de juegos, torneos, promociones y características que atienden a jugadores de todos los niveles de habilidad y preferencias. Sin embargo, no todos pueden disfrutar de los beneficios de PokerStars, ya que el sitio está bloqueado o restringido en muchos países debido a problemas legales o regulatorios. En este artículo, te mostraremos cómo descargar PokerStars en países prohibidos usando una VPN, una solución simple y efectiva que te permite evitar las restricciones geográficas y jugar al poker online desde cualquier lugar.

    -

    ¿Qué es PokerStars y por qué está prohibido en algunos países?

    -

    Características y beneficios de PokerStars

    -

    PokerStars es la plataforma de poker online líder en el mundo, con más de 100 millones de usuarios registrados y millones de jugadores activos cada día. PokerStars ofrece una variedad de juegos, incluyendo Texas Hold'em, Omaha, Stud, Razz, Draw, Juegos mixtos y más. También puedes encontrar torneos de todos los tamaños y apuestas, desde freerolls y micro stakes hasta high rollers y eventos importantes como el Campeonato Mundial de Poker Online (WCOOP) y el Campeonato de Primavera de Poker Online (SCOOP). PokerStars también tiene un programa de lealtad llamado PokerStars Rewards, que te ofrece recompensas personalizadas basadas en tu actividad de juego. Puedes ganar puntos de recompensa cada vez que juegues con dinero real o hagas apuestas, y cambiarlos por premios en efectivo, tickets de torneos, mercancía o StarsCoin, que puedes usar en la Tienda de Recompensas.

    -

    cómo descargar pokerstars en países prohibidos


    Download ✑ ✑ ✑ https://bltlly.com/2v6Mz7



    -

    Jurisdicciones prohibidas por PokerStars

    -

    A pesar de su popularidad y reputación, PokerStars no está disponible en todos los países. Hay varias razones por las que PokerStars puede estar prohibido o restringido en algunas jurisdicciones, como:

    - -

    Según el sitio web oficial, PokerStars tiene tres categorías de jurisdicciones prohibidas:

    - -CategoríaDescripciónEjemplos -Bloqueado de la actividad de dinero realPaíses donde los jugadores pueden acceder a juegos de dinero ficticio pero no a juegos de dinero realAustralia, Colombia, Egipto, EAU, USA (excepto donde haya licencia local) -Países donde los jugadores no pueden acceder a juegos de dinero real ni freemium (como Jackpot Poker)Afganistán, Irán, Irak, Corea del Norte, Arabia Saudita, Turquía -Bloqueado de dinero real, freemium y la actividad de dinero ficticioPaíses donde los jugadores no pueden acceder a ningún juego en absolutoCuba, Hong Kong, Israel, Libia, Sudán, Siria - -

    La lista de jurisdicciones prohibidas

    Cómo usar una VPN para acceder a PokerStars desde cualquier lugar

    -

    ¿Qué es una VPN y cómo funciona?

    -

    Una VPN, o red privada virtual, es un servicio que crea una conexión segura y cifrada entre su dispositivo y un servidor remoto. Al usar una VPN, puede cambiar su dirección IP y ubicación, haciendo que parezca que está navegando desde otro país. De esta manera, puedes acceder a sitios web y servicios que están bloqueados o restringidos en tu región, como PokerStars.

    - -

    Las mejores VPNs para poker online en 2023

    -

    No todas las VPN son adecuadas para el póquer en línea, ya que algunas pueden tener velocidades lentas, conexiones no confiables, características de seguridad pobres o ubicaciones de servidor limitadas. Para jugar a PokerStars sin ningún problema, necesita una VPN que cumpla los siguientes criterios:

    - -

    En base a estos criterios, hemos probado y seleccionado las mejores VPNs para poker online en 2023. Aquí están nuestras principales recomendaciones:

    -
      -
    1. NordVPN: Nuestra mejor VPN para poker online. NordVPN tiene más de 5.500 servidores en 60 países, incluyendo muchos donde PokerStars está disponible. Ofrece velocidades increíblemente rápidas y conexiones confiables para juegos ininterrumpidos. También tiene excelentes características de seguridad, como el cifrado AES de 256 bits, un interruptor de interrupción, protección contra fugas de DNS y una estricta política de no registro. NordVPN también ofrece una opción de dirección IP dedicada por un extra de $70/año, lo que le da una dirección IP única que no se comparte con otros usuarios. Puede usar NordVPN en hasta 6 dispositivos simultáneamente con una cuenta. NordVPN también tiene una garantía de devolución de dinero de 30 días, para que pueda probarla sin riesgos.
    2. - -
    3. PrivateVPN: PrivateVPN es una VPN económica para el póquer en línea. Tiene más de 200 servidores en 63 países, incluyendo muchos donde PokerStars está desbloqueado. Ofrece velocidades rápidas y consistentes para un juego sin problemas. También tiene características de seguridad sólidas, como el cifrado AES de 256 bits, un interruptor de interrupción, protección contra fugas de DNS y una política de no registro. PrivateVPN también ofrece IP dedicadas dinámicas públicas de forma gratuita con su plan estándar. Estas son IP que se le asignan exclusivamente cuando se conecta a ciertos servidores (como el servidor estadounidense de Buffalo en Nueva York). Puede usar PrivateVPN en hasta 10 dispositivos simultáneamente con una cuenta. PrivateVPN también tiene una garantía de devolución de dinero de 30 días, la posibilidad de ser marcado como usuario de VPN por PokerStars. Puede obtener una dirección IP dedicada de algunas de las VPN que recomendamos anteriormente, como NordVPN o PIA.
    4. -
    5. Use un servicio VPN de buena reputación: Un servicio VPN de buena reputación es uno que tiene velocidades rápidas y consistentes, conexiones confiables, características de seguridad sólidas y una política de no registro. Estos factores pueden ayudarlo a evitar interrupciones, fugas o rastros de su uso de VPN al jugar al póquer en línea. Debe evitar el uso de VPN gratuitas o de baja calidad, ya que pueden comprometer su privacidad y seguridad en línea, o exponerlo a detección y prohibiciones de PokerStars.
    6. -
    7. Usa el mismo país o región: Si tienes una cuenta de PokerStars existente, debes usar un servidor VPN que coincida con el país o la región de tu cuenta. Esto puede ayudarte a evitar sospechas o inconsistencias al jugar al poker online. Por ejemplo, si tu cuenta de PokerStars está registrada en el Reino Unido, debes usar un servidor VPN del Reino Unido para acceder a PokerStars. Si desea crear una nueva cuenta de PokerStars, debe usar un servidor VPN que corresponda al país o región donde desea jugar.
    8. - - -

      ¿Cómo puedo depositar y retirar dinero de PokerStars usando una VPN?

      -

      Si utiliza una VPN para acceder a PokerStars desde un país prohibido, puede enfrentar algunos desafíos cuando se trata de depositar y retirar dinero de su cuenta. Esto se debe a que algunos métodos de pago pueden no estar disponibles o ser compatibles con su ubicación VPN, o pueden requerir verificación o identificación que puede revelar su verdadera ubicación. Para evitar estos problemas, debes seguir estos consejos:

      -
        -
      • Utilice un servicio de e-wallet: Un servicio de e-wallet es una plataforma de pago en línea que le permite almacenar y transferir dinero en línea. Algunos ejemplos de servicios de monedero electrónico son PayPal, Skrill, Neteller y ecoPayz. Estos servicios son ampliamente aceptados por PokerStars y otros sitios de juego en línea, y pueden ayudarlo a evitar cargos bancarios o de tarjetas de crédito, restricciones o verificación. También puede utilizar un servicio de monedero electrónico para convertir su moneda a la utilizada por PokerStars, como USD o EUR.
      • -
      • Usar una criptomoneda: Una criptomoneda es una moneda digital que opera independientemente de cualquier autoridad central o intermediario. Algunos ejemplos de criptomonedas son Bitcoin, Ethereum, Litecoin y Dogecoin. Estas monedas son seguras, anónimas y descentralizadas, y pueden ayudarlo a evitar la censura o la regulación de su gobierno o ISP. También puede utilizar una criptomoneda para evitar tasas de conversión de divisas o fluctuaciones. Sin embargo, debe tener en cuenta que las criptomonedas son volátiles y riesgosas, y que no todos los sitios de juego en línea las aceptan.
      • - -
      -

      Al usar estos consejos, puedes depositar y retirar dinero de PokerStars usando una VPN más fácil y segura. Sin embargo, debes seguir revisando los términos y condiciones de PokerStars y los métodos de pago que utilizas, y asegurarte de cumplir con las leyes y regulaciones locales relativas al juego en línea y las transferencias de dinero.

      -

      -

      ¿Cuáles son los riesgos de jugar al poker online con una VPN?

      -

      Jugar al póquer en línea con una VPN puede ser una gran manera de acceder a PokerStars y otros sitios de juego en línea desde cualquier lugar, pero también viene con algunos riesgos y desafíos que debe tener en cuenta. Algunos de ellos son:

      -
        -
      • Cuestiones legales o reglamentarias: El juego en línea es ilegal o está restringido en muchos países, y el uso de una VPN para acceder a PokerStars u otros sitios de juego en línea puede violar las leyes y regulaciones locales. Usted puede enfrentar consecuencias legales o sanciones si es capturado o reportado por su gobierno, ISP o PokerStars. Siempre debe verificar el estado legal de los juegos de azar en línea en su país antes de jugar, y usar una VPN bajo su propio riesgo.
      • -
      • Suspensión o cierre de cuenta: PokerStars no prohíbe explícitamente el uso de VPN, pero se reserva el derecho de cerrar o suspender su cuenta si detecta alguna actividad sospechosa o fraudulenta. Esto significa que si utiliza una VPN para acceder a PokerStars desde un país prohibido, puede correr el riesgo de perder su cuenta o fondos. Siempre debe seguir los consejos que proporcionamos anteriormente para evitar la detección y las prohibiciones de PokerStars, y usar una VPN bajo su propio riesgo.
      • - -
      -

      Al ser consciente de estos riesgos y desafíos, puede jugar al póquer en línea con una VPN más segura y responsable. Sin embargo, debe entender que no hay garantía de que no encontrará problemas o problemas al usar una VPN para acceder a PokerStars desde un país prohibido, y que es responsable de las consecuencias que puedan surgir del uso de una VPN para jugar al póquer en línea.

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/appdirs.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/appdirs.py deleted file mode 100644 index 16933bf8afedcbe3e9d4fcc04e5f7246228c56fc..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/appdirs.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -This code wraps the vendored appdirs module to so the return values are -compatible for the current pip code base. - -The intention is to rewrite current usages gradually, keeping the tests pass, -and eventually drop this after all usages are changed. -""" - -import os -import sys -from typing import List - -from pip._vendor import platformdirs as _appdirs - - -def user_cache_dir(appname: str) -> str: - return _appdirs.user_cache_dir(appname, appauthor=False) - - -def _macos_user_config_dir(appname: str, roaming: bool = True) -> str: - # Use ~/Application Support/pip, if the directory exists. - path = _appdirs.user_data_dir(appname, appauthor=False, roaming=roaming) - if os.path.isdir(path): - return path - - # Use a Linux-like ~/.config/pip, by default. - linux_like_path = "~/.config/" - if appname: - linux_like_path = os.path.join(linux_like_path, appname) - - return os.path.expanduser(linux_like_path) - - -def user_config_dir(appname: str, roaming: bool = True) -> str: - if sys.platform == "darwin": - return _macos_user_config_dir(appname, roaming) - - return _appdirs.user_config_dir(appname, appauthor=False, roaming=roaming) - - -# for the discussion regarding site_config_dir locations -# see -def site_config_dirs(appname: str) -> List[str]: - if sys.platform == "darwin": - return [_appdirs.site_data_dir(appname, appauthor=False, multipath=True)] - - dirval = _appdirs.site_config_dir(appname, appauthor=False, multipath=True) - if sys.platform == "win32": - return [dirval] - - # Unix-y system. Look in /etc as well. - return dirval.split(os.pathsep) + ["/etc"] diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/big5freq.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/big5freq.py deleted file mode 100644 index 87d9f972edde20d1f8e391b8010703242a8de977..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/big5freq.py +++ /dev/null @@ -1,386 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# Big5 frequency table -# by Taiwan's Mandarin Promotion Council -# -# -# 128 --> 0.42261 -# 256 --> 0.57851 -# 512 --> 0.74851 -# 1024 --> 0.89384 -# 2048 --> 0.97583 -# -# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98 -# Random Distribution Ration = 512/(5401-512)=0.105 -# -# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR - -BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75 - -# Char to FreqOrder table -BIG5_TABLE_SIZE = 5376 -# fmt: off -BIG5_CHAR_TO_FREQ_ORDER = ( - 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16 -3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32 -1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48 - 63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64 -3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80 -4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96 -5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112 - 630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128 - 179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144 - 995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160 -2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176 -1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192 -3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208 - 706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224 -1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240 -3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256 -2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272 - 437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288 -3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304 -1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320 -5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336 - 266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352 -5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368 -1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384 - 32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400 - 188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416 -3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432 -3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448 - 324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464 -2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480 -2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496 - 314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512 - 287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528 -3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544 -1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560 -1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576 -1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592 -2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608 - 265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624 -4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640 -1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656 -5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672 -2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688 - 383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704 - 98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720 - 523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736 - 710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752 -5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768 - 379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784 -1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800 - 585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816 - 690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832 -5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848 -1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864 - 544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880 -3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896 -4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912 -3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928 - 279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944 - 610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960 -1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976 -4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992 -3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008 -3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024 -2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040 -5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056 -3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072 -5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088 -1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104 -2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120 -1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136 - 78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152 -1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168 -4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184 -3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200 - 534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216 - 165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232 - 626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248 -2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264 -5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280 -1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296 -2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312 -1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328 -1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344 -5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360 -5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376 -5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392 -3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408 -4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424 -4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440 -2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456 -5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472 -3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488 - 598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504 -5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520 -5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536 -1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552 -2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568 -3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584 -4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600 -5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616 -3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632 -4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648 -1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664 -1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680 -4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696 -1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712 - 240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728 -1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744 -1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760 -3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776 - 619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792 -5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808 -2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824 -1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840 -1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856 -5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872 - 829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888 -4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904 - 375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920 -2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936 - 444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952 -1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968 -1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984 - 730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000 -4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016 -4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032 -1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048 -3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064 -5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080 -5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096 -1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112 -2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128 -1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144 -3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160 -2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176 -3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192 -2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208 -4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224 -4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240 -3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256 - 97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272 -3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288 - 424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304 -3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320 -4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336 -3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352 -1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368 -5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384 - 199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400 -5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416 -1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432 - 391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448 -4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464 -4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480 - 397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496 -2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512 -2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528 -3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544 -1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560 -4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576 -2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592 -1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608 -1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624 -2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640 -3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656 -1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672 -5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688 -1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704 -4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720 -1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736 - 135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752 -1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768 -4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784 -4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800 -2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816 -1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832 -4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848 - 660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864 -5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880 -2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896 -3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912 -4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928 - 790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944 -5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960 -5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976 -1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992 -4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008 -4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024 -2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040 -3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056 -3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072 -2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088 -1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104 -4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120 -3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136 -3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152 -2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168 -4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184 -5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200 -3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216 -2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232 -3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248 -1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264 -2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280 -3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296 -4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312 -2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328 -2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344 -5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360 -1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376 -2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392 -1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408 -3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424 -4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440 -2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456 -3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472 -3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488 -2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504 -4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520 -2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536 -3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552 -4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568 -5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584 -3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600 - 194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616 -1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632 -4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648 -1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664 -4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680 -5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696 - 510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712 -5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728 -5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744 -2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760 -3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776 -2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792 -2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808 - 681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824 -1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840 -4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856 -3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872 -3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888 - 838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904 -2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920 - 625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936 -2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952 -4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968 -1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984 -4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000 -1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016 -3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032 - 574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048 -3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064 -5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080 -5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096 -3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112 -3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128 -1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144 -2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160 -5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176 -1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192 -1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208 -3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224 - 919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240 -1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256 -4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272 -5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288 -2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304 -3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320 - 516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336 -1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352 -2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368 -2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384 -5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400 -5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416 -5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432 -2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448 -2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464 -1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480 -4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496 -3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512 -3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528 -4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544 -4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560 -2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576 -2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592 -5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608 -4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624 -5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640 -4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656 - 502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672 - 121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688 -1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704 -3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720 -4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736 -1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752 -5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768 -2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784 -2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800 -3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816 -5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832 -1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848 -3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864 -5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880 -1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896 -5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912 -2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928 -3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944 -2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960 -3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976 -3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992 -3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008 -4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024 - 803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040 -2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056 -4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072 -3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088 -5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104 -1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120 -5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136 - 425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152 -1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168 - 479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184 -4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200 -1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216 -4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232 -1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248 - 433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264 -3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280 -4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296 -5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312 - 938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328 -3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344 - 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360 -2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 -) -# fmt: on diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/version.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/version.py deleted file mode 100644 index d906a2c99e66d8b2e8220dd9a256db2088a4633e..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/version.py +++ /dev/null @@ -1,4 +0,0 @@ -# file generated by setuptools_scm -# don't change, don't track in version control -__version__ = version = '3.2.0' -__version_tuple__ = version_tuple = (3, 2, 0) diff --git a/spaces/Boadiwaa/Recipes/openai/openai_object.py b/spaces/Boadiwaa/Recipes/openai/openai_object.py deleted file mode 100644 index 58e458dfed7b57e6a02fc1136ac2a99d65ee777c..0000000000000000000000000000000000000000 --- a/spaces/Boadiwaa/Recipes/openai/openai_object.py +++ /dev/null @@ -1,294 +0,0 @@ -import json -from copy import deepcopy -from typing import Optional - -import openai -from openai import api_requestor, util -from openai.openai_response import OpenAIResponse -from openai.util import ApiType - - -class OpenAIObject(dict): - api_base_override = None - - def __init__( - self, - id=None, - api_key=None, - api_version=None, - api_type=None, - organization=None, - response_ms: Optional[int] = None, - api_base=None, - engine=None, - **params, - ): - super(OpenAIObject, self).__init__() - - if response_ms is not None and not isinstance(response_ms, int): - raise TypeError(f"response_ms is a {type(response_ms).__name__}.") - self._response_ms = response_ms - - self._retrieve_params = params - - object.__setattr__(self, "api_key", api_key) - object.__setattr__(self, "api_version", api_version) - object.__setattr__(self, "api_type", api_type) - object.__setattr__(self, "organization", organization) - object.__setattr__(self, "api_base_override", api_base) - object.__setattr__(self, "engine", engine) - - if id: - self["id"] = id - - @property - def response_ms(self) -> Optional[int]: - return self._response_ms - - def __setattr__(self, k, v): - if k[0] == "_" or k in self.__dict__: - return super(OpenAIObject, self).__setattr__(k, v) - - self[k] = v - return None - - def __getattr__(self, k): - if k[0] == "_": - raise AttributeError(k) - try: - return self[k] - except KeyError as err: - raise AttributeError(*err.args) - - def __delattr__(self, k): - if k[0] == "_" or k in self.__dict__: - return super(OpenAIObject, self).__delattr__(k) - else: - del self[k] - - def __setitem__(self, k, v): - if v == "": - raise ValueError( - "You cannot set %s to an empty string. " - "We interpret empty strings as None in requests." - "You may set %s.%s = None to delete the property" % (k, str(self), k) - ) - super(OpenAIObject, self).__setitem__(k, v) - - def __delitem__(self, k): - raise NotImplementedError("del is not supported") - - # Custom unpickling method that uses `update` to update the dictionary - # without calling __setitem__, which would fail if any value is an empty - # string - def __setstate__(self, state): - self.update(state) - - # Custom pickling method to ensure the instance is pickled as a custom - # class and not as a dict, otherwise __setstate__ would not be called when - # unpickling. - def __reduce__(self): - reduce_value = ( - type(self), # callable - ( # args - self.get("id", None), - self.api_key, - self.api_version, - self.api_type, - self.organization, - ), - dict(self), # state - ) - return reduce_value - - @classmethod - def construct_from( - cls, - values, - api_key: Optional[str] = None, - api_version=None, - organization=None, - engine=None, - response_ms: Optional[int] = None, - ): - instance = cls( - values.get("id"), - api_key=api_key, - api_version=api_version, - organization=organization, - engine=engine, - response_ms=response_ms, - ) - instance.refresh_from( - values, - api_key=api_key, - api_version=api_version, - organization=organization, - response_ms=response_ms, - ) - return instance - - def refresh_from( - self, - values, - api_key=None, - api_version=None, - api_type=None, - organization=None, - response_ms: Optional[int] = None, - ): - self.api_key = api_key or getattr(values, "api_key", None) - self.api_version = api_version or getattr(values, "api_version", None) - self.api_type = api_type or getattr(values, "api_type", None) - self.organization = organization or getattr(values, "organization", None) - self._response_ms = response_ms or getattr(values, "_response_ms", None) - - # Wipe old state before setting new. - self.clear() - for k, v in values.items(): - super(OpenAIObject, self).__setitem__( - k, util.convert_to_openai_object(v, api_key, api_version, organization) - ) - - self._previous = values - - @classmethod - def api_base(cls): - return None - - def request( - self, - method, - url, - params=None, - headers=None, - stream=False, - plain_old_data=False, - request_id: Optional[str] = None, - ): - if params is None: - params = self._retrieve_params - requestor = api_requestor.APIRequestor( - key=self.api_key, - api_base=self.api_base_override or self.api_base(), - api_type=self.api_type, - api_version=self.api_version, - organization=self.organization, - ) - response, stream, api_key = requestor.request( - method, - url, - params=params, - stream=stream, - headers=headers, - request_id=request_id, - ) - - if stream: - assert not isinstance(response, OpenAIResponse) # must be an iterator - return ( - util.convert_to_openai_object( - line, - api_key, - self.api_version, - self.organization, - plain_old_data=plain_old_data, - ) - for line in response - ) - else: - return util.convert_to_openai_object( - response, - api_key, - self.api_version, - self.organization, - plain_old_data=plain_old_data, - ) - - def __repr__(self): - ident_parts = [type(self).__name__] - - obj = self.get("object") - if isinstance(obj, str): - ident_parts.append(obj) - - if isinstance(self.get("id"), str): - ident_parts.append("id=%s" % (self.get("id"),)) - - unicode_repr = "<%s at %s> JSON: %s" % ( - " ".join(ident_parts), - hex(id(self)), - str(self), - ) - - return unicode_repr - - def __str__(self): - obj = self.to_dict_recursive() - return json.dumps(obj, sort_keys=True, indent=2) - - def to_dict(self): - return dict(self) - - def to_dict_recursive(self): - d = dict(self) - for k, v in d.items(): - if isinstance(v, OpenAIObject): - d[k] = v.to_dict_recursive() - elif isinstance(v, list): - d[k] = [ - e.to_dict_recursive() if isinstance(e, OpenAIObject) else e - for e in v - ] - return d - - @property - def openai_id(self): - return self.id - - @property - def typed_api_type(self): - return ( - ApiType.from_str(self.api_type) - if self.api_type - else ApiType.from_str(openai.api_type) - ) - - # This class overrides __setitem__ to throw exceptions on inputs that it - # doesn't like. This can cause problems when we try to copy an object - # wholesale because some data that's returned from the API may not be valid - # if it was set to be set manually. Here we override the class' copy - # arguments so that we can bypass these possible exceptions on __setitem__. - def __copy__(self): - copied = OpenAIObject( - self.get("id"), - self.api_key, - api_version=self.api_version, - api_type=self.api_type, - organization=self.organization, - ) - - copied._retrieve_params = self._retrieve_params - - for k, v in self.items(): - # Call parent's __setitem__ to avoid checks that we've added in the - # overridden version that can throw exceptions. - super(OpenAIObject, copied).__setitem__(k, v) - - return copied - - # This class overrides __setitem__ to throw exceptions on inputs that it - # doesn't like. This can cause problems when we try to copy an object - # wholesale because some data that's returned from the API may not be valid - # if it was set to be set manually. Here we override the class' copy - # arguments so that we can bypass these possible exceptions on __setitem__. - def __deepcopy__(self, memo): - copied = self.__copy__() - memo[id(self)] = copied - - for k, v in self.items(): - # Call parent's __setitem__ to avoid checks that we've added in the - # overridden version that can throw exceptions. - super(OpenAIObject, copied).__setitem__(k, deepcopy(v, memo)) - - return copied diff --git a/spaces/Bradjan310/ehartford-Wizard-Vicuna-30B-Uncensored/README.md b/spaces/Bradjan310/ehartford-Wizard-Vicuna-30B-Uncensored/README.md deleted file mode 100644 index 1a110856ded2ff2bfc3eebd679140543721879e2..0000000000000000000000000000000000000000 --- a/spaces/Bradjan310/ehartford-Wizard-Vicuna-30B-Uncensored/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Ehartford Wizard Vicuna 30B Uncensored -emoji: 🏃 -colorFrom: blue -colorTo: blue -sdk: gradio -sdk_version: 3.44.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/base_model.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/base_model.py deleted file mode 100644 index e8237bbb2810d05cadaa0cf69f584a45acef2609..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/base_model.py +++ /dev/null @@ -1,60 +0,0 @@ -import torch -import torch.nn as nn -from attention import Attention, NewAttention -from language_model import WordEmbedding, QuestionEmbedding -from classifier import SimpleClassifier -from fc import FCNet - - -class BaseModel(nn.Module): - def __init__(self, w_emb, q_emb, v_att, q_net, v_net, classifier): - super(BaseModel, self).__init__() - self.w_emb = w_emb - self.q_emb = q_emb - self.v_att = v_att - self.q_net = q_net - self.v_net = v_net - self.classifier = classifier - - def forward(self, v, b, q, labels): - """Forward - - v: [batch, num_objs, obj_dim] - b: [batch, num_objs, b_dim] - q: [batch_size, seq_length] - - return: logits, not probs - """ - w_emb = self.w_emb(q) - q_emb = self.q_emb(w_emb) # [batch, q_dim] - - att = self.v_att(v, q_emb) - v_emb = (att * v).sum(1) # [batch, v_dim] - - q_repr = self.q_net(q_emb) - v_repr = self.v_net(v_emb) - joint_repr = q_repr * v_repr - logits = self.classifier(joint_repr) - return logits - - -def build_baseline0(dataset, num_hid): - w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0) - q_emb = QuestionEmbedding(300, num_hid, 1, False, 0.0) - v_att = Attention(dataset.v_dim, q_emb.num_hid, num_hid) - q_net = FCNet([num_hid, num_hid]) - v_net = FCNet([dataset.v_dim, num_hid]) - classifier = SimpleClassifier( - num_hid, 2 * num_hid, dataset.num_ans_candidates, 0.5) - return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier) - - -def build_baseline0_newatt(dataset, num_hid): - w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0) - q_emb = QuestionEmbedding(300, num_hid, 1, False, 0.0) - v_att = NewAttention(dataset.v_dim, q_emb.num_hid, num_hid) - q_net = FCNet([q_emb.num_hid, num_hid]) - v_net = FCNet([dataset.v_dim, num_hid]) - classifier = SimpleClassifier( - num_hid, num_hid * 2, dataset.num_ans_candidates, 0.5) - return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier) diff --git a/spaces/CVPR/lama-example/models/ade20k/segm_lib/utils/data/distributed.py b/spaces/CVPR/lama-example/models/ade20k/segm_lib/utils/data/distributed.py deleted file mode 100644 index c3d890e28fd2b9e044bdd9494de4a43ad2471eed..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/models/ade20k/segm_lib/utils/data/distributed.py +++ /dev/null @@ -1,58 +0,0 @@ -import math -import torch -from .sampler import Sampler -from torch.distributed import get_world_size, get_rank - - -class DistributedSampler(Sampler): - """Sampler that restricts data loading to a subset of the dataset. - - It is especially useful in conjunction with - :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each - process can pass a DistributedSampler instance as a DataLoader sampler, - and load a subset of the original dataset that is exclusive to it. - - .. note:: - Dataset is assumed to be of constant size. - - Arguments: - dataset: Dataset used for sampling. - num_replicas (optional): Number of processes participating in - distributed training. - rank (optional): Rank of the current process within num_replicas. - """ - - def __init__(self, dataset, num_replicas=None, rank=None): - if num_replicas is None: - num_replicas = get_world_size() - if rank is None: - rank = get_rank() - self.dataset = dataset - self.num_replicas = num_replicas - self.rank = rank - self.epoch = 0 - self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) - self.total_size = self.num_samples * self.num_replicas - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - indices = list(torch.randperm(len(self.dataset), generator=g)) - - # add extra samples to make it evenly divisible - indices += indices[:(self.total_size - len(indices))] - assert len(indices) == self.total_size - - # subsample - offset = self.num_samples * self.rank - indices = indices[offset:offset + self.num_samples] - assert len(indices) == self.num_samples - - return iter(indices) - - def __len__(self): - return self.num_samples - - def set_epoch(self, epoch): - self.epoch = epoch diff --git a/spaces/CVPR/regionclip-demo/detectron2/export/caffe2_inference.py b/spaces/CVPR/regionclip-demo/detectron2/export/caffe2_inference.py deleted file mode 100644 index deb886c0417285ed1d5ad85eb941fa1ac757cdab..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/export/caffe2_inference.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import logging -import numpy as np -from itertools import count -import torch -from caffe2.proto import caffe2_pb2 -from caffe2.python import core - -from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format -from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type - -logger = logging.getLogger(__name__) - - -# ===== ref: mobile-vision predictor's 'Caffe2Wrapper' class ====== -class ProtobufModel(torch.nn.Module): - """ - Wrapper of a caffe2's protobuf model. - It works just like nn.Module, but running caffe2 under the hood. - Input/Output are tuple[tensor] that match the caffe2 net's external_input/output. - """ - - _ids = count(0) - - def __init__(self, predict_net, init_net): - logger.info(f"Initializing ProtobufModel for: {predict_net.name} ...") - super().__init__() - assert isinstance(predict_net, caffe2_pb2.NetDef) - assert isinstance(init_net, caffe2_pb2.NetDef) - # create unique temporary workspace for each instance - self.ws_name = "__tmp_ProtobufModel_{}__".format(next(self._ids)) - self.net = core.Net(predict_net) - - logger.info("Running init_net once to fill the parameters ...") - with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws: - ws.RunNetOnce(init_net) - uninitialized_external_input = [] - for blob in self.net.Proto().external_input: - if blob not in ws.Blobs(): - uninitialized_external_input.append(blob) - ws.CreateBlob(blob) - ws.CreateNet(self.net) - - self._error_msgs = set() - self._input_blobs = uninitialized_external_input - - def _infer_output_devices(self, inputs): - """ - Returns: - list[str]: list of device for each external output - """ - - def _get_device_type(torch_tensor): - assert torch_tensor.device.type in ["cpu", "cuda"] - assert torch_tensor.device.index == 0 - return torch_tensor.device.type - - predict_net = self.net.Proto() - input_device_types = { - (name, 0): _get_device_type(tensor) for name, tensor in zip(self._input_blobs, inputs) - } - device_type_map = infer_device_type( - predict_net, known_status=input_device_types, device_name_style="pytorch" - ) - ssa, versions = core.get_ssa(predict_net) - versioned_outputs = [(name, versions[name]) for name in predict_net.external_output] - output_devices = [device_type_map[outp] for outp in versioned_outputs] - return output_devices - - def forward(self, inputs): - """ - Args: - inputs (tuple[torch.Tensor]) - - Returns: - tuple[torch.Tensor] - """ - assert len(inputs) == len(self._input_blobs), ( - f"Length of inputs ({len(inputs)}) " - f"doesn't match the required input blobs: {self._input_blobs}" - ) - - with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws: - for b, tensor in zip(self._input_blobs, inputs): - ws.FeedBlob(b, tensor) - - try: - ws.RunNet(self.net.Proto().name) - except RuntimeError as e: - if not str(e) in self._error_msgs: - self._error_msgs.add(str(e)) - logger.warning("Encountered new RuntimeError: \n{}".format(str(e))) - logger.warning("Catch the error and use partial results.") - - c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output] - # Remove outputs of current run, this is necessary in order to - # prevent fetching the result from previous run if the model fails - # in the middle. - for b in self.net.Proto().external_output: - # Needs to create uninitialized blob to make the net runable. - # This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b), - # but there'no such API. - ws.FeedBlob(b, f"{b}, a C++ native class of type nullptr (uninitialized).") - - # Cast output to torch.Tensor on the desired device - output_devices = ( - self._infer_output_devices(inputs) - if any(t.device.type != "cpu" for t in inputs) - else ["cpu" for _ in self.net.Proto().external_output] - ) - - outputs = [] - for name, c2_output, device in zip( - self.net.Proto().external_output, c2_outputs, output_devices - ): - if not isinstance(c2_output, np.ndarray): - raise RuntimeError( - "Invalid output for blob {}, received: {}".format(name, c2_output) - ) - outputs.append(torch.tensor(c2_output).to(device=device)) - return tuple(outputs) - - -class ProtobufDetectionModel(torch.nn.Module): - """ - A class works just like a pytorch meta arch in terms of inference, but running - caffe2 model under the hood. - """ - - def __init__(self, predict_net, init_net, *, convert_outputs=None): - """ - Args: - predict_net, init_net (core.Net): caffe2 nets - convert_outptus (callable): a function that converts caffe2 - outputs to the same format of the original pytorch model. - By default, use the one defined in the caffe2 meta_arch. - """ - super().__init__() - self.protobuf_model = ProtobufModel(predict_net, init_net) - self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0) - self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii") - - if convert_outputs is None: - meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN") - meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")] - self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net) - else: - self._convert_outputs = convert_outputs - - def _convert_inputs(self, batched_inputs): - # currently all models convert inputs in the same way - return convert_batched_inputs_to_c2_format( - batched_inputs, self.size_divisibility, self.device - ) - - def forward(self, batched_inputs): - c2_inputs = self._convert_inputs(batched_inputs) - c2_results = self.protobuf_model(c2_inputs) - c2_results = dict(zip(self.protobuf_model.net.Proto().external_output, c2_results)) - return self._convert_outputs(batched_inputs, c2_inputs, c2_results) diff --git a/spaces/CVPR/regionclip-demo/detectron2/utils/collect_env.py b/spaces/CVPR/regionclip-demo/detectron2/utils/collect_env.py deleted file mode 100644 index a2359d332124b49024c5ae59fe3a4a51a92d181b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/utils/collect_env.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import importlib -import numpy as np -import os -import re -import subprocess -import sys -from collections import defaultdict -import PIL -import torch -import torchvision -from tabulate import tabulate - -__all__ = ["collect_env_info"] - - -def collect_torch_env(): - try: - import torch.__config__ - - return torch.__config__.show() - except ImportError: - # compatible with older versions of pytorch - from torch.utils.collect_env import get_pretty_env_info - - return get_pretty_env_info() - - -def get_env_module(): - var_name = "DETECTRON2_ENV_MODULE" - return var_name, os.environ.get(var_name, "") - - -def detect_compute_compatibility(CUDA_HOME, so_file): - try: - cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump") - if os.path.isfile(cuobjdump): - output = subprocess.check_output( - "'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True - ) - output = output.decode("utf-8").strip().split("\n") - arch = [] - for line in output: - line = re.findall(r"\.sm_([0-9]*)\.", line)[0] - arch.append(".".join(line)) - arch = sorted(set(arch)) - return ", ".join(arch) - else: - return so_file + "; cannot find cuobjdump" - except Exception: - # unhandled failure - return so_file - - -def collect_env_info(): - has_gpu = torch.cuda.is_available() # true for both CUDA & ROCM - torch_version = torch.__version__ - - # NOTE that CUDA_HOME/ROCM_HOME could be None even when CUDA runtime libs are functional - from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME - - has_rocm = False - if (getattr(torch.version, "hip", None) is not None) and (ROCM_HOME is not None): - has_rocm = True - has_cuda = has_gpu and (not has_rocm) - - data = [] - data.append(("sys.platform", sys.platform)) # check-template.yml depends on it - data.append(("Python", sys.version.replace("\n", ""))) - data.append(("numpy", np.__version__)) - - try: - import detectron2 # noqa - - data.append( - ("detectron2", detectron2.__version__ + " @" + os.path.dirname(detectron2.__file__)) - ) - except ImportError: - data.append(("detectron2", "failed to import")) - except AttributeError: - data.append(("detectron2", "imported a wrong installation")) - - try: - import detectron2._C as _C - except ImportError as e: - data.append(("detectron2._C", f"not built correctly: {e}")) - - # print system compilers when extension fails to build - if sys.platform != "win32": # don't know what to do for windows - try: - # this is how torch/utils/cpp_extensions.py choose compiler - cxx = os.environ.get("CXX", "c++") - cxx = subprocess.check_output("'{}' --version".format(cxx), shell=True) - cxx = cxx.decode("utf-8").strip().split("\n")[0] - except subprocess.SubprocessError: - cxx = "Not found" - data.append(("Compiler ($CXX)", cxx)) - - if has_cuda and CUDA_HOME is not None: - try: - nvcc = os.path.join(CUDA_HOME, "bin", "nvcc") - nvcc = subprocess.check_output("'{}' -V".format(nvcc), shell=True) - nvcc = nvcc.decode("utf-8").strip().split("\n")[-1] - except subprocess.SubprocessError: - nvcc = "Not found" - data.append(("CUDA compiler", nvcc)) - if has_cuda and sys.platform != "win32": - try: - so_file = importlib.util.find_spec("detectron2._C").origin - except (ImportError, AttributeError): - pass - else: - data.append( - ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, so_file)) - ) - else: - # print compilers that are used to build extension - data.append(("Compiler", _C.get_compiler_version())) - data.append(("CUDA compiler", _C.get_cuda_version())) # cuda or hip - if has_cuda and getattr(_C, "has_cuda", lambda: True)(): - data.append( - ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, _C.__file__)) - ) - - data.append(get_env_module()) - data.append(("PyTorch", torch_version + " @" + os.path.dirname(torch.__file__))) - data.append(("PyTorch debug build", torch.version.debug)) - - data.append(("GPU available", has_gpu)) - if has_gpu: - devices = defaultdict(list) - for k in range(torch.cuda.device_count()): - cap = ".".join((str(x) for x in torch.cuda.get_device_capability(k))) - name = torch.cuda.get_device_name(k) + f" (arch={cap})" - devices[name].append(str(k)) - for name, devids in devices.items(): - data.append(("GPU " + ",".join(devids), name)) - - if has_rocm: - msg = " - invalid!" if not (ROCM_HOME and os.path.isdir(ROCM_HOME)) else "" - data.append(("ROCM_HOME", str(ROCM_HOME) + msg)) - else: - msg = " - invalid!" if not (CUDA_HOME and os.path.isdir(CUDA_HOME)) else "" - data.append(("CUDA_HOME", str(CUDA_HOME) + msg)) - - cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None) - if cuda_arch_list: - data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list)) - data.append(("Pillow", PIL.__version__)) - - try: - data.append( - ( - "torchvision", - str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__), - ) - ) - if has_cuda: - try: - torchvision_C = importlib.util.find_spec("torchvision._C").origin - msg = detect_compute_compatibility(CUDA_HOME, torchvision_C) - data.append(("torchvision arch flags", msg)) - except (ImportError, AttributeError): - data.append(("torchvision._C", "Not found")) - except AttributeError: - data.append(("torchvision", "unknown")) - - try: - import fvcore - - data.append(("fvcore", fvcore.__version__)) - except (ImportError, AttributeError): - pass - - try: - import iopath - - data.append(("iopath", iopath.__version__)) - except (ImportError, AttributeError): - pass - - try: - import cv2 - - data.append(("cv2", cv2.__version__)) - except (ImportError, AttributeError): - data.append(("cv2", "Not found")) - env_str = tabulate(data) + "\n" - env_str += collect_torch_env() - return env_str - - -if __name__ == "__main__": - try: - from detectron2.utils.collect_env import collect_env_info as f - - print(f()) - except ImportError: - print(collect_env_info()) - - if torch.cuda.is_available(): - for k in range(torch.cuda.device_count()): - device = f"cuda:{k}" - try: - x = torch.tensor([1, 2.0], dtype=torch.float32) - x = x.to(device) - except Exception as e: - print( - f"Unable to copy tensor to device={device}: {e}. " - "Your CUDA environment is broken." - ) diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/__init__.py b/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/__init__.py deleted file mode 100644 index 2af819d61d589cfec2e0ca46612a7456f42b831a..0000000000000000000000000000000000000000 --- a/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Conditional DETR -# Copyright (c) 2021 Microsoft. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Copied from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -# ------------------------------------------------------------------------ - -from .groundingdino import build_groundingdino diff --git a/spaces/Cherrycreamco/webui/oh-no.py b/spaces/Cherrycreamco/webui/oh-no.py deleted file mode 100644 index e8c0f3bd8d72805b4ee69d4d0fd9133347d00f92..0000000000000000000000000000000000000000 --- a/spaces/Cherrycreamco/webui/oh-no.py +++ /dev/null @@ -1,14 +0,0 @@ -import gradio as gr - -block = gr.Blocks() - -def run(): - with block: - gr.Markdown( - """ -

      oh no 😐 something wrong with the 🤗 hugging face servers 😐 hopefully, it will be fixed soon

      - """) - block.launch(server_name="0.0.0.0", server_port=7860) - -if __name__ == "__main__": - run() \ No newline at end of file diff --git a/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/cldm/hack.py b/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/cldm/hack.py deleted file mode 100644 index 454361e9d036cd1a6a79122c2fd16b489e4767b1..0000000000000000000000000000000000000000 --- a/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/cldm/hack.py +++ /dev/null @@ -1,111 +0,0 @@ -import torch -import einops - -import ldm.modules.encoders.modules -import ldm.modules.attention - -from transformers import logging -from ldm.modules.attention import default - - -def disable_verbosity(): - logging.set_verbosity_error() - print('logging improved.') - return - - -def enable_sliced_attention(): - ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward - print('Enabled sliced_attention.') - return - - -def hack_everything(clip_skip=0): - disable_verbosity() - ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward - ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip - print('Enabled clip hacks.') - return - - -# Written by Lvmin -def _hacked_clip_forward(self, text): - PAD = self.tokenizer.pad_token_id - EOS = self.tokenizer.eos_token_id - BOS = self.tokenizer.bos_token_id - - def tokenize(t): - return self.tokenizer(t, truncation=False, add_special_tokens=False)["input_ids"] - - def transformer_encode(t): - if self.clip_skip > 1: - rt = self.transformer(input_ids=t, output_hidden_states=True) - return self.transformer.text_model.final_layer_norm(rt.hidden_states[-self.clip_skip]) - else: - return self.transformer(input_ids=t, output_hidden_states=False).last_hidden_state - - def split(x): - return x[75 * 0: 75 * 1], x[75 * 1: 75 * 2], x[75 * 2: 75 * 3] - - def pad(x, p, i): - return x[:i] if len(x) >= i else x + [p] * (i - len(x)) - - raw_tokens_list = tokenize(text) - tokens_list = [] - - for raw_tokens in raw_tokens_list: - raw_tokens_123 = split(raw_tokens) - raw_tokens_123 = [[BOS] + raw_tokens_i + [EOS] for raw_tokens_i in raw_tokens_123] - raw_tokens_123 = [pad(raw_tokens_i, PAD, 77) for raw_tokens_i in raw_tokens_123] - tokens_list.append(raw_tokens_123) - - tokens_list = torch.IntTensor(tokens_list).to(self.device) - - feed = einops.rearrange(tokens_list, 'b f i -> (b f) i') - y = transformer_encode(feed) - z = einops.rearrange(y, '(b f) i c -> b (f i) c', f=3) - - return z - - -# Stolen from https://github.com/basujindal/stable-diffusion/blob/main/optimizedSD/splitAttention.py -def _hacked_sliced_attentin_forward(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - del context, x - - q, k, v = map(lambda t: einops.rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - limit = k.shape[0] - att_step = 1 - q_chunks = list(torch.tensor_split(q, limit // att_step, dim=0)) - k_chunks = list(torch.tensor_split(k, limit // att_step, dim=0)) - v_chunks = list(torch.tensor_split(v, limit // att_step, dim=0)) - - q_chunks.reverse() - k_chunks.reverse() - v_chunks.reverse() - sim = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device) - del k, q, v - for i in range(0, limit, att_step): - q_buffer = q_chunks.pop() - k_buffer = k_chunks.pop() - v_buffer = v_chunks.pop() - sim_buffer = torch.einsum('b i d, b j d -> b i j', q_buffer, k_buffer) * self.scale - - del k_buffer, q_buffer - # attention, what we cannot get enough of, by chunks - - sim_buffer = sim_buffer.softmax(dim=-1) - - sim_buffer = torch.einsum('b i j, b j d -> b i d', sim_buffer, v_buffer) - del v_buffer - sim[i:i + att_step, :, :] = sim_buffer - - del sim_buffer - sim = einops.rearrange(sim, '(b h) n d -> b n (h d)', h=h) - return self.to_out(sim) diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/utils/model_serialization.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/utils/model_serialization.py deleted file mode 100644 index a95ad8b2a7a787d62dc3ea580b2dfd30e358da28..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/utils/model_serialization.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -from collections import OrderedDict -import logging - -import torch - -from maskrcnn_benchmark.utils.imports import import_file - - -def align_and_update_state_dicts(model_state_dict, loaded_state_dict): - """ - Strategy: suppose that the models that we will create will have prefixes appended - to each of its keys, for example due to an extra level of nesting that the original - pre-trained weights from ImageNet won't contain. For example, model.state_dict() - might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains - res2.conv1.weight. We thus want to match both parameters together. - For that, we look for each model weight, look among all loaded keys if there is one - that is a suffix of the current weight name, and use it if that's the case. - If multiple matches exist, take the one with longest size - of the corresponding name. For example, for the same model as before, the pretrained - weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, - we want to match backbone[0].body.conv1.weight to conv1.weight, and - backbone[0].body.res2.conv1.weight to res2.conv1.weight. - """ - current_keys = sorted(list(model_state_dict.keys())) - loaded_keys = sorted(list(loaded_state_dict.keys())) - # get a matrix of string matches, where each (i, j) entry correspond to the size of the - # loaded_key string, if it matches - match_matrix = [ - len(j) if i.endswith(j) else 0 for i in current_keys for j in loaded_keys - ] - match_matrix = torch.as_tensor(match_matrix).view( - len(current_keys), len(loaded_keys) - ) - max_match_size, idxs = match_matrix.max(1) - # remove indices that correspond to no-match - idxs[max_match_size == 0] = -1 - - # used for logging - max_size = max([len(key) for key in current_keys]) if current_keys else 1 - max_size_loaded = max([len(key) for key in loaded_keys]) if loaded_keys else 1 - log_str_template = "{: <{}} loaded from {: <{}} of shape {}" - logger = logging.getLogger(__name__) - for idx_new, idx_old in enumerate(idxs.tolist()): - if idx_old == -1: - continue - key = current_keys[idx_new] - key_old = loaded_keys[idx_old] - model_state_dict[key] = loaded_state_dict[key_old] - logger.info( - log_str_template.format( - key, - max_size, - key_old, - max_size_loaded, - tuple(loaded_state_dict[key_old].shape), - ) - ) - - -def strip_prefix_if_present(state_dict, prefix): - keys = sorted(state_dict.keys()) - if not all(key.startswith(prefix) for key in keys): - return state_dict - stripped_state_dict = OrderedDict() - for key, value in state_dict.items(): - stripped_state_dict[key.replace(prefix, "")] = value - return stripped_state_dict - - -def load_state_dict(model, loaded_state_dict): - model_state_dict = model.state_dict() - # if the state_dict comes from a model that was wrapped in a - # DataParallel or DistributedDataParallel during serialization, - # remove the "module" prefix before performing the matching - loaded_state_dict = strip_prefix_if_present(loaded_state_dict, prefix="module.") - align_and_update_state_dicts(model_state_dict, loaded_state_dict) - - # use strict loading - model.load_state_dict(model_state_dict) diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/blip2_outputs.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/blip2_outputs.py deleted file mode 100644 index 92d83a0556e6c5c3c0a603279f318605ae25d6d5..0000000000000000000000000000000000000000 --- a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/blip2_outputs.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -Adapted from salesforce@LAVIS. Below is the original copyright: - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -from dataclasses import dataclass -from typing import Optional - -import torch -from transformers.modeling_outputs import ( - ModelOutput, - BaseModelOutputWithPoolingAndCrossAttentions, - CausalLMOutputWithCrossAttentions, -) - - -@dataclass -class BlipSimilarity(ModelOutput): - sim_i2t: torch.FloatTensor = None - sim_t2i: torch.FloatTensor = None - - sim_i2t_m: Optional[torch.FloatTensor] = None - sim_t2i_m: Optional[torch.FloatTensor] = None - - sim_i2t_targets: Optional[torch.FloatTensor] = None - sim_t2i_targets: Optional[torch.FloatTensor] = None - - -@dataclass -class BlipIntermediateOutput(ModelOutput): - """ - Data class for intermediate outputs of BLIP models. - - image_embeds (torch.FloatTensor): Image embeddings, shape (batch_size, num_patches, embed_dim). - text_embeds (torch.FloatTensor): Text embeddings, shape (batch_size, seq_len, embed_dim). - - image_embeds_m (torch.FloatTensor): Image embeddings from momentum visual encoder, shape (batch_size, num_patches, embed_dim). - text_embeds_m (torch.FloatTensor): Text embeddings from momentum text encoder, shape (batch_size, seq_len, embed_dim). - - encoder_output (BaseModelOutputWithPoolingAndCrossAttentions): output from the image-grounded text encoder. - encoder_output_neg (BaseModelOutputWithPoolingAndCrossAttentions): output from the image-grounded text encoder for negative pairs. - - decoder_output (CausalLMOutputWithCrossAttentions): output from the image-grounded text decoder. - decoder_labels (torch.LongTensor): labels for the captioning loss. - - itm_logits (torch.FloatTensor): logits for the image-text matching loss, shape (batch_size * 3, 2). - itm_labels (torch.LongTensor): labels for the image-text matching loss, shape (batch_size * 3,) - - """ - - # uni-modal features - image_embeds: torch.FloatTensor = None - text_embeds: Optional[torch.FloatTensor] = None - - image_embeds_m: Optional[torch.FloatTensor] = None - text_embeds_m: Optional[torch.FloatTensor] = None - - # intermediate outputs of multimodal encoder - encoder_output: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None - encoder_output_neg: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None - - itm_logits: Optional[torch.FloatTensor] = None - itm_labels: Optional[torch.LongTensor] = None - - # intermediate outputs of multimodal decoder - decoder_output: Optional[CausalLMOutputWithCrossAttentions] = None - decoder_labels: Optional[torch.LongTensor] = None - - -@dataclass -class BlipOutput(ModelOutput): - # some finetuned models (e.g. BlipVQA) do not compute similarity, thus optional. - sims: Optional[BlipSimilarity] = None - - intermediate_output: BlipIntermediateOutput = None - - loss: Optional[torch.FloatTensor] = None - - loss_itc: Optional[torch.FloatTensor] = None - - loss_itm: Optional[torch.FloatTensor] = None - - loss_lm: Optional[torch.FloatTensor] = None - - -@dataclass -class BlipOutputFeatures(ModelOutput): - """ - Data class of features from BlipFeatureExtractor. - - Args: - image_embeds: (torch.FloatTensor) of shape (batch_size, num_patches+1, embed_dim), optional - image_features: (torch.FloatTensor) of shape (batch_size, num_patches+1, feature_dim), optional - text_embeds: (torch.FloatTensor) of shape (batch_size, sequence_length+1, embed_dim), optional - text_features: (torch.FloatTensor) of shape (batch_size, sequence_length+1, feature_dim), optional - - The first embedding or feature is for the [CLS] token. - - Features are obtained by projecting the corresponding embedding into a normalized low-dimensional space. - """ - - image_embeds: Optional[torch.FloatTensor] = None - image_embeds_proj: Optional[torch.FloatTensor] = None - - text_embeds: Optional[torch.FloatTensor] = None - text_embeds_proj: Optional[torch.FloatTensor] = None - - multimodal_embeds: Optional[torch.FloatTensor] = None diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/filelock/_util.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/filelock/_util.py deleted file mode 100644 index 3d95731f4f0bc247e2bf73a5f1ddbcc590eb471d..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/filelock/_util.py +++ /dev/null @@ -1,37 +0,0 @@ -from __future__ import annotations - -import os -import stat -import sys -from errno import EACCES, EISDIR - - -def raise_on_not_writable_file(filename: str) -> None: - """ - Raise an exception if attempting to open the file for writing would fail. - This is done so files that will never be writable can be separated from - files that are writable but currently locked - :param filename: file to check - :raises OSError: as if the file was opened for writing. - """ - try: # use stat to do exists + can write to check without race condition - file_stat = os.stat(filename) # noqa: PTH116 - except OSError: - return # swallow does not exist or other errors - - if file_stat.st_mtime != 0: # if os.stat returns but modification is zero that's an invalid os.stat - ignore it - if not (file_stat.st_mode & stat.S_IWUSR): - raise PermissionError(EACCES, "Permission denied", filename) - - if stat.S_ISDIR(file_stat.st_mode): - if sys.platform == "win32": # pragma: win32 cover - # On Windows, this is PermissionError - raise PermissionError(EACCES, "Permission denied", filename) - else: # pragma: win32 no cover # noqa: RET506 - # On linux / macOS, this is IsADirectoryError - raise IsADirectoryError(EISDIR, "Is a directory", filename) - - -__all__ = [ - "raise_on_not_writable_file", -] diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/C_P_A_L_.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/C_P_A_L_.py deleted file mode 100644 index 03eb851e8c02edc509e8f1f3681dca5b5b740145..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/C_P_A_L_.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod - -from fontTools.misc.textTools import bytesjoin, safeEval -from . import DefaultTable -import array -from collections import namedtuple -import struct -import sys - - -class table_C_P_A_L_(DefaultTable.DefaultTable): - - NO_NAME_ID = 0xFFFF - DEFAULT_PALETTE_TYPE = 0 - - def __init__(self, tag=None): - DefaultTable.DefaultTable.__init__(self, tag) - self.palettes = [] - self.paletteTypes = [] - self.paletteLabels = [] - self.paletteEntryLabels = [] - - def decompile(self, data, ttFont): - ( - self.version, - self.numPaletteEntries, - numPalettes, - numColorRecords, - goffsetFirstColorRecord, - ) = struct.unpack(">HHHHL", data[:12]) - assert ( - self.version <= 1 - ), "Version of CPAL table is higher than I know how to handle" - self.palettes = [] - pos = 12 - for i in range(numPalettes): - startIndex = struct.unpack(">H", data[pos : pos + 2])[0] - assert startIndex + self.numPaletteEntries <= numColorRecords - pos += 2 - palette = [] - ppos = goffsetFirstColorRecord + startIndex * 4 - for j in range(self.numPaletteEntries): - palette.append(Color(*struct.unpack(">BBBB", data[ppos : ppos + 4]))) - ppos += 4 - self.palettes.append(palette) - if self.version == 0: - offsetToPaletteTypeArray = 0 - offsetToPaletteLabelArray = 0 - offsetToPaletteEntryLabelArray = 0 - else: - pos = 12 + numPalettes * 2 - ( - offsetToPaletteTypeArray, - offsetToPaletteLabelArray, - offsetToPaletteEntryLabelArray, - ) = struct.unpack(">LLL", data[pos : pos + 12]) - self.paletteTypes = self._decompileUInt32Array( - data, - offsetToPaletteTypeArray, - numPalettes, - default=self.DEFAULT_PALETTE_TYPE, - ) - self.paletteLabels = self._decompileUInt16Array( - data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID - ) - self.paletteEntryLabels = self._decompileUInt16Array( - data, - offsetToPaletteEntryLabelArray, - self.numPaletteEntries, - default=self.NO_NAME_ID, - ) - - def _decompileUInt16Array(self, data, offset, numElements, default=0): - if offset == 0: - return [default] * numElements - result = array.array("H", data[offset : offset + 2 * numElements]) - if sys.byteorder != "big": - result.byteswap() - assert len(result) == numElements, result - return result.tolist() - - def _decompileUInt32Array(self, data, offset, numElements, default=0): - if offset == 0: - return [default] * numElements - result = array.array("I", data[offset : offset + 4 * numElements]) - if sys.byteorder != "big": - result.byteswap() - assert len(result) == numElements, result - return result.tolist() - - def compile(self, ttFont): - colorRecordIndices, colorRecords = self._compileColorRecords() - paletteTypes = self._compilePaletteTypes() - paletteLabels = self._compilePaletteLabels() - paletteEntryLabels = self._compilePaletteEntryLabels() - numColorRecords = len(colorRecords) // 4 - offsetToFirstColorRecord = 12 + len(colorRecordIndices) - if self.version >= 1: - offsetToFirstColorRecord += 12 - header = struct.pack( - ">HHHHL", - self.version, - self.numPaletteEntries, - len(self.palettes), - numColorRecords, - offsetToFirstColorRecord, - ) - if self.version == 0: - dataList = [header, colorRecordIndices, colorRecords] - else: - pos = offsetToFirstColorRecord + len(colorRecords) - if len(paletteTypes) == 0: - offsetToPaletteTypeArray = 0 - else: - offsetToPaletteTypeArray = pos - pos += len(paletteTypes) - if len(paletteLabels) == 0: - offsetToPaletteLabelArray = 0 - else: - offsetToPaletteLabelArray = pos - pos += len(paletteLabels) - if len(paletteEntryLabels) == 0: - offsetToPaletteEntryLabelArray = 0 - else: - offsetToPaletteEntryLabelArray = pos - pos += len(paletteLabels) - header1 = struct.pack( - ">LLL", - offsetToPaletteTypeArray, - offsetToPaletteLabelArray, - offsetToPaletteEntryLabelArray, - ) - dataList = [ - header, - colorRecordIndices, - header1, - colorRecords, - paletteTypes, - paletteLabels, - paletteEntryLabels, - ] - return bytesjoin(dataList) - - def _compilePalette(self, palette): - assert len(palette) == self.numPaletteEntries - pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha) - return bytesjoin([pack(color) for color in palette]) - - def _compileColorRecords(self): - colorRecords, colorRecordIndices, pool = [], [], {} - for palette in self.palettes: - packedPalette = self._compilePalette(palette) - if packedPalette in pool: - index = pool[packedPalette] - else: - index = len(colorRecords) - colorRecords.append(packedPalette) - pool[packedPalette] = index - colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries)) - return bytesjoin(colorRecordIndices), bytesjoin(colorRecords) - - def _compilePaletteTypes(self): - if self.version == 0 or not any(self.paletteTypes): - return b"" - assert len(self.paletteTypes) == len(self.palettes) - result = bytesjoin([struct.pack(">I", ptype) for ptype in self.paletteTypes]) - assert len(result) == 4 * len(self.palettes) - return result - - def _compilePaletteLabels(self): - if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels): - return b"" - assert len(self.paletteLabels) == len(self.palettes) - result = bytesjoin([struct.pack(">H", label) for label in self.paletteLabels]) - assert len(result) == 2 * len(self.palettes) - return result - - def _compilePaletteEntryLabels(self): - if self.version == 0 or all( - l == self.NO_NAME_ID for l in self.paletteEntryLabels - ): - return b"" - assert len(self.paletteEntryLabels) == self.numPaletteEntries - result = bytesjoin( - [struct.pack(">H", label) for label in self.paletteEntryLabels] - ) - assert len(result) == 2 * self.numPaletteEntries - return result - - def toXML(self, writer, ttFont): - numPalettes = len(self.palettes) - paletteLabels = {i: nameID for (i, nameID) in enumerate(self.paletteLabels)} - paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)} - writer.simpletag("version", value=self.version) - writer.newline() - writer.simpletag("numPaletteEntries", value=self.numPaletteEntries) - writer.newline() - for index, palette in enumerate(self.palettes): - attrs = {"index": index} - paletteType = paletteTypes.get(index, self.DEFAULT_PALETTE_TYPE) - paletteLabel = paletteLabels.get(index, self.NO_NAME_ID) - if self.version > 0 and paletteLabel != self.NO_NAME_ID: - attrs["label"] = paletteLabel - if self.version > 0 and paletteType != self.DEFAULT_PALETTE_TYPE: - attrs["type"] = paletteType - writer.begintag("palette", **attrs) - writer.newline() - if ( - self.version > 0 - and paletteLabel != self.NO_NAME_ID - and ttFont - and "name" in ttFont - ): - name = ttFont["name"].getDebugName(paletteLabel) - if name is not None: - writer.comment(name) - writer.newline() - assert len(palette) == self.numPaletteEntries - for cindex, color in enumerate(palette): - color.toXML(writer, ttFont, cindex) - writer.endtag("palette") - writer.newline() - if self.version > 0 and not all( - l == self.NO_NAME_ID for l in self.paletteEntryLabels - ): - writer.begintag("paletteEntryLabels") - writer.newline() - for index, label in enumerate(self.paletteEntryLabels): - if label != self.NO_NAME_ID: - writer.simpletag("label", index=index, value=label) - if self.version > 0 and label and ttFont and "name" in ttFont: - name = ttFont["name"].getDebugName(label) - if name is not None: - writer.comment(name) - writer.newline() - writer.endtag("paletteEntryLabels") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "palette": - self.paletteLabels.append(int(attrs.get("label", self.NO_NAME_ID))) - self.paletteTypes.append(int(attrs.get("type", self.DEFAULT_PALETTE_TYPE))) - palette = [] - for element in content: - if isinstance(element, str): - continue - attrs = element[1] - color = Color.fromHex(attrs["value"]) - palette.append(color) - self.palettes.append(palette) - elif name == "paletteEntryLabels": - colorLabels = {} - for element in content: - if isinstance(element, str): - continue - elementName, elementAttr, _ = element - if elementName == "label": - labelIndex = safeEval(elementAttr["index"]) - nameID = safeEval(elementAttr["value"]) - colorLabels[labelIndex] = nameID - self.paletteEntryLabels = [ - colorLabels.get(i, self.NO_NAME_ID) - for i in range(self.numPaletteEntries) - ] - elif "value" in attrs: - value = safeEval(attrs["value"]) - setattr(self, name, value) - if name == "numPaletteEntries": - self.paletteEntryLabels = [self.NO_NAME_ID] * self.numPaletteEntries - - -class Color(namedtuple("Color", "blue green red alpha")): - def hex(self): - return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha) - - def __repr__(self): - return self.hex() - - def toXML(self, writer, ttFont, index=None): - writer.simpletag("color", value=self.hex(), index=index) - writer.newline() - - @classmethod - def fromHex(cls, value): - if value[0] == "#": - value = value[1:] - red = int(value[0:2], 16) - green = int(value[2:4], 16) - blue = int(value[4:6], 16) - alpha = int(value[6:8], 16) if len(value) >= 8 else 0xFF - return cls(red=red, green=green, blue=blue, alpha=alpha) - - @classmethod - def fromRGBA(cls, red, green, blue, alpha): - return cls(red=red, green=green, blue=blue, alpha=alpha) diff --git a/spaces/DaCuteRaccoon/dalle-mini/index.html b/spaces/DaCuteRaccoon/dalle-mini/index.html deleted file mode 100644 index fdfd83b76c6b2371a100ead6d8fcc90db8f74256..0000000000000000000000000000000000000000 --- a/spaces/DaCuteRaccoon/dalle-mini/index.html +++ /dev/null @@ -1,295 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - -
      - - - diff --git a/spaces/Dagfinn1962/stablediffusion-members/images.py b/spaces/Dagfinn1962/stablediffusion-members/images.py deleted file mode 100644 index b7eff8335d63acef2be9bd688efcdca123149a50..0000000000000000000000000000000000000000 --- a/spaces/Dagfinn1962/stablediffusion-members/images.py +++ /dev/null @@ -1,22 +0,0 @@ -import glob -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -%matplotlib inline - -images = [] -for img_path in sorted(glob.glob('brain.png'), reverse=True): - images.append(mpimg.imread(img_path)) - -images = images[:15] - -plt.figure(figsize=(20,10)) - -columns = 5 -for i, image in enumerate(images): - ax = plt.subplot(len(images) / columns + 1, columns, i + 1) - ax.axes.xaxis.set_visible(False) - ax.axes.yaxis.set_visible(False) - ax.axis('off') - plt.imshow(image) - gc.collect() - diff --git a/spaces/Datasculptor/MusicGen/audiocraft/models/musicgen.py b/spaces/Datasculptor/MusicGen/audiocraft/models/musicgen.py deleted file mode 100644 index 007dd9e0ed1cfd359fb4889e7f4108248e189941..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/MusicGen/audiocraft/models/musicgen.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Main model for using MusicGen. This will combine all the required components -and provide easy access to the generation API. -""" - -import os -import typing as tp - -import torch - -from .encodec import CompressionModel -from .lm import LMModel -from .builders import get_debug_compression_model, get_debug_lm_model -from .loaders import load_compression_model, load_lm_model, HF_MODEL_CHECKPOINTS_MAP -from ..data.audio_utils import convert_audio -from ..modules.conditioners import ConditioningAttributes, WavCondition -from ..utils.autocast import TorchAutocast - - -MelodyList = tp.List[tp.Optional[torch.Tensor]] -MelodyType = tp.Union[torch.Tensor, MelodyList] - - -class MusicGen: - """MusicGen main model with convenient generation API. - - Args: - name (str): name of the model. - compression_model (CompressionModel): Compression model - used to map audio to invertible discrete representations. - lm (LMModel): Language model over discrete representations. - """ - def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel, - max_duration: float = 30): - self.name = name - self.compression_model = compression_model - self.lm = lm - self.max_duration = max_duration - self.device = next(iter(lm.parameters())).device - self.generation_params: dict = {} - self.set_generation_params(duration=15) # 15 seconds by default - self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None - if self.device.type == 'cpu': - self.autocast = TorchAutocast(enabled=False) - else: - self.autocast = TorchAutocast( - enabled=True, device_type=self.device.type, dtype=torch.float16) - - @property - def frame_rate(self) -> int: - """Roughly the number of AR steps per seconds.""" - return self.compression_model.frame_rate - - @property - def sample_rate(self) -> int: - """Sample rate of the generated audio.""" - return self.compression_model.sample_rate - - @property - def audio_channels(self) -> int: - """Audio channels of the generated audio.""" - return self.compression_model.channels - - @staticmethod - def get_pretrained(name: str = 'melody', device=None): - """Return pretrained model, we provide four models: - - small (300M), text to music, # see: https://huggingface.co/facebook/musicgen-small - - medium (1.5B), text to music, # see: https://huggingface.co/facebook/musicgen-medium - - melody (1.5B) text to music and text+melody to music, # see: https://huggingface.co/facebook/musicgen-melody - - large (3.3B), text to music, # see: https://huggingface.co/facebook/musicgen-large - """ - - if device is None: - if torch.cuda.device_count(): - device = 'cuda' - else: - device = 'cpu' - - if name == 'debug': - # used only for unit tests - compression_model = get_debug_compression_model(device) - lm = get_debug_lm_model(device) - return MusicGen(name, compression_model, lm) - - if name not in HF_MODEL_CHECKPOINTS_MAP: - if not os.path.isfile(name) and not os.path.isdir(name): - raise ValueError( - f"{name} is not a valid checkpoint name. " - f"Choose one of {', '.join(HF_MODEL_CHECKPOINTS_MAP.keys())}" - ) - - cache_dir = os.environ.get('MUSICGEN_ROOT', None) - compression_model = load_compression_model(name, device=device, cache_dir=cache_dir) - lm = load_lm_model(name, device=device, cache_dir=cache_dir) - if name == 'melody': - lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True - - return MusicGen(name, compression_model, lm) - - def set_generation_params(self, use_sampling: bool = True, top_k: int = 250, - top_p: float = 0.0, temperature: float = 1.0, - duration: float = 30.0, cfg_coef: float = 3.0, - two_step_cfg: bool = False, extend_stride: float = 18): - """Set the generation parameters for MusicGen. - - Args: - use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True. - top_k (int, optional): top_k used for sampling. Defaults to 250. - top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0. - temperature (float, optional): Softmax temperature parameter. Defaults to 1.0. - duration (float, optional): Duration of the generated waveform. Defaults to 30.0. - cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0. - two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance, - instead of batching together the two. This has some impact on how things - are padded but seems to have little impact in practice. - extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much - should we extend the audio each time. Larger values will mean less context is - preserved, and shorter value will require extra computations. - """ - assert extend_stride < self.max_duration, "Cannot stride by more than max generation duration." - self.extend_stride = extend_stride - self.duration = duration - self.generation_params = { - 'use_sampling': use_sampling, - 'temp': temperature, - 'top_k': top_k, - 'top_p': top_p, - 'cfg_coef': cfg_coef, - 'two_step_cfg': two_step_cfg, - } - - def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None): - """Override the default progress callback.""" - self._progress_callback = progress_callback - - def generate_unconditional(self, num_samples: int, progress: bool = False) -> torch.Tensor: - """Generate samples in an unconditional manner. - - Args: - num_samples (int): Number of samples to be generated. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - descriptions: tp.List[tp.Optional[str]] = [None] * num_samples - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None) - return self._generate_tokens(attributes, prompt_tokens, progress) - - def generate(self, descriptions: tp.List[str], progress: bool = False) -> torch.Tensor: - """Generate samples conditioned on text. - - Args: - descriptions (tp.List[str]): A list of strings used as text conditioning. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None) - assert prompt_tokens is None - return self._generate_tokens(attributes, prompt_tokens, progress) - - def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType, - melody_sample_rate: int, progress: bool = False) -> torch.Tensor: - """Generate samples conditioned on text and melody. - - Args: - descriptions (tp.List[str]): A list of strings used as text conditioning. - melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as - melody conditioning. Should have shape [B, C, T] with B matching the description length, - C=1 or 2. It can be [C, T] if there is a single description. It can also be - a list of [C, T] tensors. - melody_sample_rate: (int): Sample rate of the melody waveforms. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - if isinstance(melody_wavs, torch.Tensor): - if melody_wavs.dim() == 2: - melody_wavs = melody_wavs[None] - if melody_wavs.dim() != 3: - raise ValueError("Melody wavs should have a shape [B, C, T].") - melody_wavs = list(melody_wavs) - else: - for melody in melody_wavs: - if melody is not None: - assert melody.dim() == 2, "One melody in the list has the wrong number of dims." - - melody_wavs = [ - convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels) - if wav is not None else None - for wav in melody_wavs] - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None, - melody_wavs=melody_wavs) - assert prompt_tokens is None - return self._generate_tokens(attributes, prompt_tokens, progress) - - def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int, - descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None, - progress: bool = False) -> torch.Tensor: - """Generate samples conditioned on audio prompts. - - Args: - prompt (torch.Tensor): A batch of waveforms used for continuation. - Prompt should be [B, C, T], or [C, T] if only one sample is generated. - prompt_sample_rate (int): Sampling rate of the given audio waveforms. - descriptions (tp.List[str], optional): A list of strings used as text conditioning. Defaults to None. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - if prompt.dim() == 2: - prompt = prompt[None] - if prompt.dim() != 3: - raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).") - prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels) - if descriptions is None: - descriptions = [None] * len(prompt) - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt) - assert prompt_tokens is not None - return self._generate_tokens(attributes, prompt_tokens, progress) - - @torch.no_grad() - def _prepare_tokens_and_attributes( - self, - descriptions: tp.Sequence[tp.Optional[str]], - prompt: tp.Optional[torch.Tensor], - melody_wavs: tp.Optional[MelodyList] = None, - ) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]: - """Prepare model inputs. - - Args: - descriptions (tp.List[str]): A list of strings used as text conditioning. - prompt (torch.Tensor): A batch of waveforms used for continuation. - melody_wavs (tp.Optional[torch.Tensor], optional): A batch of waveforms - used as melody conditioning. Defaults to None. - """ - attributes = [ - ConditioningAttributes(text={'description': description}) - for description in descriptions] - - if melody_wavs is None: - for attr in attributes: - attr.wav['self_wav'] = WavCondition( - torch.zeros((1, 1), device=self.device), - torch.tensor([0], device=self.device), - path='null_wav') # type: ignore - else: - if self.name != "melody": - raise RuntimeError("This model doesn't support melody conditioning. " - "Use the `melody` model.") - assert len(melody_wavs) == len(descriptions), \ - f"number of melody wavs must match number of descriptions! " \ - f"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}" - for attr, melody in zip(attributes, melody_wavs): - if melody is None: - attr.wav['self_wav'] = WavCondition( - torch.zeros((1, 1), device=self.device), - torch.tensor([0], device=self.device), - path='null_wav') # type: ignore - else: - attr.wav['self_wav'] = WavCondition( - melody.to(device=self.device), - torch.tensor([melody.shape[-1]], device=self.device)) - - if prompt is not None: - if descriptions is not None: - assert len(descriptions) == len(prompt), "Prompt and nb. descriptions doesn't match" - prompt = prompt.to(self.device) - prompt_tokens, scale = self.compression_model.encode(prompt) - assert scale is None - else: - prompt_tokens = None - return attributes, prompt_tokens - - def _generate_tokens(self, attributes: tp.List[ConditioningAttributes], - prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor: - """Generate discrete audio tokens given audio prompt and/or conditions. - - Args: - attributes (tp.List[ConditioningAttributes]): Conditions used for generation (text/melody). - prompt_tokens (tp.Optional[torch.Tensor]): Audio prompt used for continuation. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - Returns: - torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params. - """ - total_gen_len = int(self.duration * self.frame_rate) - max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate) - current_gen_offset: int = 0 - - def _progress_callback(generated_tokens: int, tokens_to_generate: int): - generated_tokens += current_gen_offset - if self._progress_callback is not None: - # Note that total_gen_len might be quite wrong depending on the - # codebook pattern used, but with delay it is almost accurate. - self._progress_callback(generated_tokens, total_gen_len) - else: - print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\r') - - if prompt_tokens is not None: - assert max_prompt_len >= prompt_tokens.shape[-1], \ - "Prompt is longer than audio to generate" - - callback = None - if progress: - callback = _progress_callback - - if self.duration <= self.max_duration: - # generate by sampling from LM, simple case. - with self.autocast: - gen_tokens = self.lm.generate( - prompt_tokens, attributes, - callback=callback, max_gen_len=total_gen_len, **self.generation_params) - - else: - # now this gets a bit messier, we need to handle prompts, - # melody conditioning etc. - ref_wavs = [attr.wav['self_wav'] for attr in attributes] - all_tokens = [] - if prompt_tokens is None: - prompt_length = 0 - else: - all_tokens.append(prompt_tokens) - prompt_length = prompt_tokens.shape[-1] - - stride_tokens = int(self.frame_rate * self.extend_stride) - - while current_gen_offset + prompt_length < total_gen_len: - time_offset = current_gen_offset / self.frame_rate - chunk_duration = min(self.duration - time_offset, self.max_duration) - max_gen_len = int(chunk_duration * self.frame_rate) - for attr, ref_wav in zip(attributes, ref_wavs): - wav_length = ref_wav.length.item() - if wav_length == 0: - continue - # We will extend the wav periodically if it not long enough. - # we have to do it here rather than in conditioners.py as otherwise - # we wouldn't have the full wav. - initial_position = int(time_offset * self.sample_rate) - wav_target_length = int(self.max_duration * self.sample_rate) - print(initial_position / self.sample_rate, wav_target_length / self.sample_rate) - positions = torch.arange(initial_position, - initial_position + wav_target_length, device=self.device) - attr.wav['self_wav'] = WavCondition( - ref_wav[0][:, positions % wav_length], - torch.full_like(ref_wav[1], wav_target_length)) - with self.autocast: - gen_tokens = self.lm.generate( - prompt_tokens, attributes, - callback=callback, max_gen_len=max_gen_len, **self.generation_params) - if prompt_tokens is None: - all_tokens.append(gen_tokens) - else: - all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:]) - prompt_tokens = gen_tokens[:, :, stride_tokens:] - prompt_length = prompt_tokens.shape[-1] - current_gen_offset += stride_tokens - - gen_tokens = torch.cat(all_tokens, dim=-1) - - # generate audio - assert gen_tokens.dim() == 3 - with torch.no_grad(): - gen_audio = self.compression_model.decode(gen_tokens, None) - return gen_audio diff --git a/spaces/DeepLabCut/MegaDetector_DeepLabCut/DLC_models/models.py b/spaces/DeepLabCut/MegaDetector_DeepLabCut/DLC_models/models.py deleted file mode 100644 index 856f92ce0f45a6c6cd762329e78d0ec4cc9d5967..0000000000000000000000000000000000000000 --- a/spaces/DeepLabCut/MegaDetector_DeepLabCut/DLC_models/models.py +++ /dev/null @@ -1,60 +0,0 @@ -import urllib.request -import tarfile -from tqdm import tqdm -import os -import yaml -from ruamel.yaml import YAML - -def read_plainconfig(configname): - if not os.path.exists(configname): - raise FileNotFoundError( - f"Config {configname} is not found. Please make sure that the file exists." - ) - with open(configname) as file: - return YAML().load(file) - -def DownloadModel(modelname, target_dir): - """ - Downloads a DeepLabCut Model Zoo Project - """ - - def show_progress(count, block_size, total_size): - pbar.update(block_size) - - def tarfilenamecutting(tarf): - """' auxfun to extract folder path - ie. /xyz-trainsetxyshufflez/ - """ - for memberid, member in enumerate(tarf.getmembers()): - if memberid == 0: - parent = str(member.path) - l = len(parent) + 1 - if member.path.startswith(parent): - member.path = member.path[l:] - yield member - - neturls = read_plainconfig("./model/pretrained_model_urls.yaml") #FIXME - - if modelname in neturls.keys(): - url = neturls[modelname] - print(url) - response = urllib.request.urlopen(url) - print( - "Downloading the model from the DeepLabCut server @Harvard -> Go Crimson!!! {}....".format( - url - ) - ) - total_size = int(response.getheader("Content-Length")) - pbar = tqdm(unit="B", total=total_size, position=0) - filename, _ = urllib.request.urlretrieve(url, reporthook=show_progress) - with tarfile.open(filename, mode="r:gz") as tar: - tar.extractall(target_dir, members=tarfilenamecutting(tar)) - else: - models = [ - fn - for fn in neturls.keys() - if "resnet_" not in fn and "mobilenet_" not in fn - ] - print("Model does not exist: ", modelname) - print("Pick one of the following: ", models) - return target_dir diff --git a/spaces/DollieHell/pisa/README.md b/spaces/DollieHell/pisa/README.md deleted file mode 100644 index 6c2e773640880f55c2104b578f17de9696f20d16..0000000000000000000000000000000000000000 --- a/spaces/DollieHell/pisa/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Pisa -emoji: 🌍 -colorFrom: indigo -colorTo: purple -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DragGan/DragGan-Inversion/training/__init__.py b/spaces/DragGan/DragGan-Inversion/training/__init__.py deleted file mode 100644 index 939e7c6c8f94c4ea1141885c3c3295fe083b06aa..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/training/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -# empty diff --git a/spaces/ECCV2022/PSG/OpenPSG/configs/_base_/datasets/psg.py b/spaces/ECCV2022/PSG/OpenPSG/configs/_base_/datasets/psg.py deleted file mode 100644 index 052dcd787578900f875b7f9d43729a188a4d2aca..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/PSG/OpenPSG/configs/_base_/datasets/psg.py +++ /dev/null @@ -1,93 +0,0 @@ -# dataset settings -dataset_type = 'PanopticSceneGraphDataset' -ann_file = './data/psg/psg.json' -coco_root = 'data/coco' - -img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadPanopticSceneGraphAnnotations', - with_bbox=True, - with_rel=True, - with_mask=True, - with_seg=True, - ), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='SegRescale', scale_factor=1 / 4), - dict(type='SceneGraphFormatBundle'), - dict( - type='Collect', - keys=[ - 'img', - 'gt_bboxes', - 'gt_labels', - 'gt_rels', - 'gt_relmaps', - 'gt_masks', - 'gt_semantic_seg', - ], - ), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - # Since the forward process may need gt info, annos must be loaded. - dict(type='LoadPanopticSceneGraphAnnotations', - with_bbox=True, - with_rel=True), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - # NOTE: Do not change the img to DC. - dict(type='ImageToTensor', keys=['img']), - dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']), - dict( - type='ToDataContainer', - fields=(dict(key='gt_bboxes'), dict(key='gt_labels')), - ), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), - ], - ), -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=ann_file, - img_prefix=coco_root, - seg_prefix=coco_root, - pipeline=train_pipeline, - split='train', - all_bboxes=True, - ), - val=dict( - type=dataset_type, - ann_file=ann_file, - img_prefix=coco_root, - seg_prefix=coco_root, - pipeline=test_pipeline, - split='test', - all_bboxes=True, - ), - test=dict( - type=dataset_type, - ann_file=ann_file, - img_prefix=coco_root, - seg_prefix=coco_root, - pipeline=test_pipeline, - split='test', - all_bboxes=True, - ), -) diff --git a/spaces/ElricOon/EYE2/main.py b/spaces/ElricOon/EYE2/main.py deleted file mode 100644 index 0d4e70821af29256cd35cb6452d3462c2489c306..0000000000000000000000000000000000000000 --- a/spaces/ElricOon/EYE2/main.py +++ /dev/null @@ -1,152 +0,0 @@ -import streamlit as st -import tensorflow as tf -from PIL import Image -import numpy as np -import cv2 -from sklearn.cluster import KMeans -from io import BytesIO - - -CLASS_NAMES = ['Cataracts', 'Diabetic Retinopathy', 'Glaucoma', 'Normal'] - - -def main(): - st.title('EYE DISEASE CLASSIFICATION') - - with st.expander("Instruction"): - st.markdown("## Description") - st.markdown(""" - - This is an eye disease classification system that allows users to upload retinal images and perform predictions. - - The system can predict the following classes:

      - Cataracts, - Diabetic Retinopathy, - Glaucoma, - Normal.

      - """, unsafe_allow_html=True) - - st.markdown("## Steps") - st.markdown(""" - 1. Click the "Browse files" button to upload a retinal image. - 2. Click the "Predict Eye Disease" button to start the eye disease prediction. - 3. Please wait for a few moments to get the result. - 4. The result will be displayed below the image. -
      - """, unsafe_allow_html=True) - - st.markdown("## Confidence") - st.markdown(""" - - Confidence is determined by the probabilities obtained from the softmax layer of the deep learning model. - - The softmax layer converts the final output of the model into probability scores, reflecting the model's confidence in its predictions for each class in the multiclass classification. - """) - - - - - model = load_model() - image = load_image() - result = st.button('PREDICT EYE DISEASE') - - - - if result: - - result_placeholder = st.empty() - result_placeholder.write('Calculating results...') - - preprocessed_image = preprocess_image(image) - predicted_class, rounded_percentage = predict(model, CLASS_NAMES, preprocessed_image) - - result_placeholder.empty() - - if predicted_class and rounded_percentage: - st.markdown(f'

      Predicted Class: {predicted_class}

      ', unsafe_allow_html=True) - st.markdown(f'

      Confidence: {rounded_percentage}%

      ', unsafe_allow_html=True) - - - - - - - -def load_model(): - model = tf.keras.models.load_model('model/VGG19/model.epoch06-loss0.34.h5') - return model - - -def load_image(): - uploaded_file = st.file_uploader(label='Pick an retinal image to test') - if uploaded_file is not None: - image_data = uploaded_file.getvalue() - - # Load image with BytesIO - temp = Image.open(BytesIO(image_data)) - - # Resizing the image - resized_image = temp.resize((224, 224)) - - # Display image into streamlit - st.image(resized_image) - - # Convert PIL image into array - image = np.array(resized_image) - - return image - else: - return None - - -def preprocess_image(image): - # Histogram Equalization Part - - # Split into 3 channels - red, green, blue = cv2.split(image) - - # Apply Histogram Equalization in RED - equalization = cv2.equalizeHist(red) - - # Merge back the channels - merged_image = cv2.merge((equalization, green, blue)) - - - # Image Segmentation Part - segmentation = merged_image.reshape(-1, 3) - - # Declare K-means clustering image segmentation - kmeans = KMeans(n_clusters=20, n_init=5) - - # Perform segmentation for the images - kmeans.fit(segmentation) - - segmented_images = kmeans.cluster_centers_[kmeans.labels_] - segmented_images = segmented_images.reshape(merged_image.shape) - # st.image(segmented_images.astype("uint8")) - - return segmented_images - - - -def predict(model, class_names, image): - # Normalize the image - normalized_image = image / 255.0 - - input_image = np.expand_dims(normalized_image, axis=0) - - predictions = model.predict(input_image) - print(predictions) - - predicted_class = class_names[np.argmax(predictions)] - accuracy = np.max(predictions) - accuracy_percentage = accuracy * 100 - rounded_percentage = round(accuracy_percentage, 2) - print(rounded_percentage) - - - return predicted_class, rounded_percentage - - - - - - -if __name__ == '__main__': - main() diff --git a/spaces/Enigma007/Medika/README.md b/spaces/Enigma007/Medika/README.md deleted file mode 100644 index 3e6b5efef57cac816d694c58ca83c3008b8c6dca..0000000000000000000000000000000000000000 --- a/spaces/Enigma007/Medika/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Medika Chats -emoji: 💻 -colorFrom: gray -colorTo: indigo -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/EsoCode/text-generation-webui/docs/System-requirements.md b/spaces/EsoCode/text-generation-webui/docs/System-requirements.md deleted file mode 100644 index 3a88416d34ad7c8babd90a81db902e95288a8197..0000000000000000000000000000000000000000 --- a/spaces/EsoCode/text-generation-webui/docs/System-requirements.md +++ /dev/null @@ -1,42 +0,0 @@ -These are the VRAM and RAM requirements (in MiB) to run some examples of models **in 16-bit (default) precision**: - -| model | VRAM (GPU) | RAM | -|:-----------------------|-------------:|--------:| -| arxiv_ai_gpt2 | 1512.37 | 5824.2 | -| blenderbot-1B-distill | 2441.75 | 4425.91 | -| opt-1.3b | 2509.61 | 4427.79 | -| gpt-neo-1.3b | 2605.27 | 5851.58 | -| opt-2.7b | 5058.05 | 4863.95 | -| gpt4chan_model_float16 | 11653.7 | 4437.71 | -| gpt-j-6B | 11653.7 | 5633.79 | -| galactica-6.7b | 12697.9 | 4429.89 | -| opt-6.7b | 12700 | 4368.66 | -| bloomz-7b1-p3 | 13483.1 | 4470.34 | - -#### GPU mode with 8-bit precision - -Allows you to load models that would not normally fit into your GPU. Enabled by default for 13b and 20b models in this web UI. - -| model | VRAM (GPU) | RAM | -|:---------------|-------------:|--------:| -| opt-13b | 12528.1 | 1152.39 | -| gpt-neox-20b | 20384 | 2291.7 | - -#### CPU mode (32-bit precision) - -A lot slower, but does not require a GPU. - -On my i5-12400F, 6B models take around 10-20 seconds to respond in chat mode, and around 5 minutes to generate a 200 tokens completion. - -| model | RAM | -|:-----------------------|---------:| -| arxiv_ai_gpt2 | 4430.82 | -| gpt-neo-1.3b | 6089.31 | -| opt-1.3b | 8411.12 | -| blenderbot-1B-distill | 8508.16 | -| opt-2.7b | 14969.3 | -| bloomz-7b1-p3 | 21371.2 | -| gpt-j-6B | 24200.3 | -| gpt4chan_model | 24246.3 | -| galactica-6.7b | 26561.4 | -| opt-6.7b | 29596.6 | diff --git a/spaces/EuroPython2022/mmocr-demo/configs/_base_/recog_models/crnn_tps.py b/spaces/EuroPython2022/mmocr-demo/configs/_base_/recog_models/crnn_tps.py deleted file mode 100644 index 9719eb3c521cee55beee1711a73bd29a07d10366..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/_base_/recog_models/crnn_tps.py +++ /dev/null @@ -1,18 +0,0 @@ -# model -label_convertor = dict( - type='CTCConvertor', dict_type='DICT36', with_unknown=False, lower=True) - -model = dict( - type='CRNNNet', - preprocessor=dict( - type='TPSPreprocessor', - num_fiducial=20, - img_size=(32, 100), - rectified_img_size=(32, 100), - num_img_channel=1), - backbone=dict(type='VeryDeepVgg', leaky_relu=False, input_channels=1), - encoder=None, - decoder=dict(type='CRNNDecoder', in_channels=512, rnn_flag=True), - loss=dict(type='CTCLoss'), - label_convertor=label_convertor, - pretrained=None) diff --git "a/spaces/Fengbinbin/gpt-academic/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" "b/spaces/Fengbinbin/gpt-academic/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" deleted file mode 100644 index 6a7d118b4439605db6e10b9a416a2e725b99a672..0000000000000000000000000000000000000000 --- "a/spaces/Fengbinbin/gpt-academic/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" +++ /dev/null @@ -1,102 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping -import requests -from bs4 import BeautifulSoup -from request_llm.bridge_all import model_info - -def google(query, proxies): - query = query # 在此处替换您要搜索的关键词 - url = f"https://www.google.com/search?q={query}" - headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'} - response = requests.get(url, headers=headers, proxies=proxies) - soup = BeautifulSoup(response.content, 'html.parser') - results = [] - for g in soup.find_all('div', class_='g'): - anchors = g.find_all('a') - if anchors: - link = anchors[0]['href'] - if link.startswith('/url?q='): - link = link[7:] - if not link.startswith('http'): - continue - title = g.find('h3').text - item = {'title': title, 'link': link} - results.append(item) - - for r in results: - print(r['link']) - return results - -def scrape_text(url, proxies) -> str: - """Scrape text from a webpage - - Args: - url (str): The URL to scrape text from - - Returns: - str: The scraped text - """ - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36', - 'Content-Type': 'text/plain', - } - try: - response = requests.get(url, headers=headers, proxies=proxies, timeout=8) - if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding - except: - return "无法连接到该网页" - soup = BeautifulSoup(response.text, "html.parser") - for script in soup(["script", "style"]): - script.extract() - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - return text - -@CatchException -def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append((f"请结合互联网信息回答以下问题:{txt}", - "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该模板可以实现ChatGPT联网信息综合。该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板。您若希望分享新的功能模组,请不吝PR!")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # ------------- < 第1步:爬取搜索引擎的结果 > ------------- - from toolbox import get_conf - proxies, = get_conf('proxies') - urls = google(txt, proxies) - history = [] - - # ------------- < 第2步:依次访问网页 > ------------- - max_search_result = 5 # 最多收纳多少个网页的结果 - for index, url in enumerate(urls[:max_search_result]): - res = scrape_text(url['link'], proxies) - history.extend([f"第{index}份搜索结果:", res]) - chatbot.append([f"第{index}份搜索结果:", res[:500]+"......"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # ------------- < 第3步:ChatGPT综合 > ------------- - i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}" - i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token - inputs=i_say, - history=history, - max_token_limit=model_info[llm_kwargs['llm_model']]['max_token']*3//4 - ) - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。" - ) - chatbot[-1] = (i_say, gpt_say) - history.append(i_say);history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - diff --git a/spaces/FlippFuzz/whisper-webui/src/vadParallel.py b/spaces/FlippFuzz/whisper-webui/src/vadParallel.py deleted file mode 100644 index c2323c0b632c34014ac1fe7ac79141b5bd9c5731..0000000000000000000000000000000000000000 --- a/spaces/FlippFuzz/whisper-webui/src/vadParallel.py +++ /dev/null @@ -1,298 +0,0 @@ -import multiprocessing -from queue import Empty -import threading -import time -from src.hooks.progressListener import ProgressListener -from src.vad import AbstractTranscription, TranscriptionConfig, get_audio_duration - -from multiprocessing import Pool, Queue - -from typing import Any, Dict, List, Union -import os - -from src.whisper.abstractWhisperContainer import AbstractWhisperCallback - -class _ProgressListenerToQueue(ProgressListener): - def __init__(self, progress_queue: Queue): - self.progress_queue = progress_queue - self.progress_total = 0 - self.prev_progress = 0 - - def on_progress(self, current: Union[int, float], total: Union[int, float]): - delta = current - self.prev_progress - self.prev_progress = current - self.progress_total = total - self.progress_queue.put(delta) - - def on_finished(self): - if self.progress_total > self.prev_progress: - delta = self.progress_total - self.prev_progress - self.progress_queue.put(delta) - self.prev_progress = self.progress_total - -class ParallelContext: - def __init__(self, num_processes: int = None, auto_cleanup_timeout_seconds: float = None): - self.num_processes = num_processes - self.auto_cleanup_timeout_seconds = auto_cleanup_timeout_seconds - self.lock = threading.Lock() - - self.ref_count = 0 - self.pool = None - self.cleanup_timer = None - - def get_pool(self): - # Initialize pool lazily - if (self.pool is None): - context = multiprocessing.get_context('spawn') - self.pool = context.Pool(self.num_processes) - - self.ref_count = self.ref_count + 1 - - if (self.auto_cleanup_timeout_seconds is not None): - self._stop_auto_cleanup() - - return self.pool - - def return_pool(self, pool): - if (self.pool == pool and self.ref_count > 0): - self.ref_count = self.ref_count - 1 - - if (self.ref_count == 0): - if (self.auto_cleanup_timeout_seconds is not None): - self._start_auto_cleanup() - - def _start_auto_cleanup(self): - if (self.cleanup_timer is not None): - self.cleanup_timer.cancel() - self.cleanup_timer = threading.Timer(self.auto_cleanup_timeout_seconds, self._execute_cleanup) - self.cleanup_timer.start() - - print("Started auto cleanup of pool in " + str(self.auto_cleanup_timeout_seconds) + " seconds") - - def _stop_auto_cleanup(self): - if (self.cleanup_timer is not None): - self.cleanup_timer.cancel() - self.cleanup_timer = None - - print("Stopped auto cleanup of pool") - - def _execute_cleanup(self): - print("Executing cleanup of pool") - - if (self.ref_count == 0): - self.close() - - def close(self): - self._stop_auto_cleanup() - - if (self.pool is not None): - print("Closing pool of " + str(self.num_processes) + " processes") - self.pool.close() - self.pool.join() - self.pool = None - -class ParallelTranscriptionConfig(TranscriptionConfig): - def __init__(self, device_id: str, override_timestamps, initial_segment_index, copy: TranscriptionConfig = None): - super().__init__(copy.non_speech_strategy, copy.segment_padding_left, copy.segment_padding_right, copy.max_silent_period, copy.max_merge_size, copy.max_prompt_window, initial_segment_index) - self.device_id = device_id - self.override_timestamps = override_timestamps - -class ParallelTranscription(AbstractTranscription): - # Silero VAD typically takes about 3 seconds per minute, so there's no need to split the chunks - # into smaller segments than 2 minute (min 6 seconds per CPU core) - MIN_CPU_CHUNK_SIZE_SECONDS = 2 * 60 - - def __init__(self, sampling_rate: int = 16000): - super().__init__(sampling_rate=sampling_rate) - - def transcribe_parallel(self, transcription: AbstractTranscription, audio: str, whisperCallable: AbstractWhisperCallback, config: TranscriptionConfig, - cpu_device_count: int, gpu_devices: List[str], cpu_parallel_context: ParallelContext = None, gpu_parallel_context: ParallelContext = None, - progress_listener: ProgressListener = None): - total_duration = get_audio_duration(audio) - - # First, get the timestamps for the original audio - if (cpu_device_count > 1 and not transcription.is_transcribe_timestamps_fast()): - merged = self._get_merged_timestamps_parallel(transcription, audio, config, total_duration, cpu_device_count, cpu_parallel_context) - else: - timestamp_segments = transcription.get_transcribe_timestamps(audio, config, 0, total_duration) - merged = transcription.get_merged_timestamps(timestamp_segments, config, total_duration) - - # We must make sure the whisper model is downloaded - if (len(gpu_devices) > 1): - whisperCallable.model_container.ensure_downloaded() - - # Split into a list for each device - # TODO: Split by time instead of by number of chunks - merged_split = list(self._split(merged, len(gpu_devices))) - - # Parameters that will be passed to the transcribe function - parameters = [] - segment_index = config.initial_segment_index - - processing_manager = multiprocessing.Manager() - progress_queue = processing_manager.Queue() - - for i in range(len(gpu_devices)): - # Note that device_segment_list can be empty. But we will still create a process for it, - # as otherwise we run the risk of assigning the same device to multiple processes. - device_segment_list = list(merged_split[i]) if i < len(merged_split) else [] - device_id = gpu_devices[i] - - print("Device " + str(device_id) + " (index " + str(i) + ") has " + str(len(device_segment_list)) + " segments") - - # Create a new config with the given device ID - device_config = ParallelTranscriptionConfig(device_id, device_segment_list, segment_index, config) - segment_index += len(device_segment_list) - - progress_listener_to_queue = _ProgressListenerToQueue(progress_queue) - parameters.append([audio, whisperCallable, device_config, progress_listener_to_queue]); - - merged = { - 'text': '', - 'segments': [], - 'language': None - } - - created_context = False - - perf_start_gpu = time.perf_counter() - - # Spawn a separate process for each device - try: - if (gpu_parallel_context is None): - gpu_parallel_context = ParallelContext(len(gpu_devices)) - created_context = True - - # Get a pool of processes - pool = gpu_parallel_context.get_pool() - - # Run the transcription in parallel - results_async = pool.starmap_async(self.transcribe, parameters) - total_progress = 0 - - while not results_async.ready(): - try: - delta = progress_queue.get(timeout=5) # Set a timeout of 5 seconds - except Empty: - continue - - total_progress += delta - if progress_listener is not None: - progress_listener.on_progress(total_progress, total_duration) - - results = results_async.get() - - # Call the finished callback - if progress_listener is not None: - progress_listener.on_finished() - - for result in results: - # Merge the results - if (result['text'] is not None): - merged['text'] += result['text'] - if (result['segments'] is not None): - merged['segments'].extend(result['segments']) - if (result['language'] is not None): - merged['language'] = result['language'] - - finally: - # Return the pool to the context - if (gpu_parallel_context is not None): - gpu_parallel_context.return_pool(pool) - # Always close the context if we created it - if (created_context): - gpu_parallel_context.close() - - perf_end_gpu = time.perf_counter() - print("Parallel transcription took " + str(perf_end_gpu - perf_start_gpu) + " seconds") - - return merged - - def _get_merged_timestamps_parallel(self, transcription: AbstractTranscription, audio: str, config: TranscriptionConfig, total_duration: float, - cpu_device_count: int, cpu_parallel_context: ParallelContext = None): - parameters = [] - - chunk_size = max(total_duration / cpu_device_count, self.MIN_CPU_CHUNK_SIZE_SECONDS) - chunk_start = 0 - cpu_device_id = 0 - - perf_start_time = time.perf_counter() - - # Create chunks that will be processed on the CPU - while (chunk_start < total_duration): - chunk_end = min(chunk_start + chunk_size, total_duration) - - if (chunk_end - chunk_start < 1): - # No need to process chunks that are less than 1 second - break - - print("Parallel VAD: Executing chunk from " + str(chunk_start) + " to " + - str(chunk_end) + " on CPU device " + str(cpu_device_id)) - parameters.append([audio, config, chunk_start, chunk_end]); - - cpu_device_id += 1 - chunk_start = chunk_end - - created_context = False - - # Spawn a separate process for each device - try: - if (cpu_parallel_context is None): - cpu_parallel_context = ParallelContext(cpu_device_count) - created_context = True - - # Get a pool of processes - pool = cpu_parallel_context.get_pool() - - # Run the transcription in parallel. Note that transcription must be picklable. - results = pool.starmap(transcription.get_transcribe_timestamps, parameters) - - timestamps = [] - - # Flatten the results - for result in results: - timestamps.extend(result) - - merged = transcription.get_merged_timestamps(timestamps, config, total_duration) - - perf_end_time = time.perf_counter() - print("Parallel VAD processing took {} seconds".format(perf_end_time - perf_start_time)) - return merged - - finally: - # Return the pool to the context - if (cpu_parallel_context is not None): - cpu_parallel_context.return_pool(pool) - # Always close the context if we created it - if (created_context): - cpu_parallel_context.close() - - def get_transcribe_timestamps(self, audio: str, config: ParallelTranscriptionConfig, start_time: float, duration: float): - return [] - - def get_merged_timestamps(self, timestamps: List[Dict[str, Any]], config: ParallelTranscriptionConfig, total_duration: float): - # Override timestamps that will be processed - if (config.override_timestamps is not None): - print("(get_merged_timestamps) Using override timestamps of size " + str(len(config.override_timestamps))) - return config.override_timestamps - return super().get_merged_timestamps(timestamps, config, total_duration) - - def transcribe(self, audio: str, whisperCallable: AbstractWhisperCallback, config: ParallelTranscriptionConfig, - progressListener: ProgressListener = None): - # Override device ID the first time - if (os.environ.get("INITIALIZED", None) is None): - os.environ["INITIALIZED"] = "1" - - # Note that this may be None if the user didn't specify a device. In that case, Whisper will - # just use the default GPU device. - if (config.device_id is not None): - print("Using device " + config.device_id) - os.environ["CUDA_VISIBLE_DEVICES"] = config.device_id - - return super().transcribe(audio, whisperCallable, config, progressListener) - - def _split(self, a, n): - """Split a list into n approximately equal parts.""" - k, m = divmod(len(a), n) - return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)) - diff --git a/spaces/FoxMeo/fire-detector/app.py b/spaces/FoxMeo/fire-detector/app.py deleted file mode 100644 index a99269340350a0631f7a35f6aa40c8b75fc0cc92..0000000000000000000000000000000000000000 --- a/spaces/FoxMeo/fire-detector/app.py +++ /dev/null @@ -1,189 +0,0 @@ -import gradio as gr -import os - -import argparse -import time -from pathlib import Path - -import cv2 -import torch -import torch.backends.cudnn as cudnn -from numpy import random - -from models.experimental import attempt_load -from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ - scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path -from utils.plots import plot_one_box -from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel -from PIL import Image - -from huggingface_hub import hf_hub_download - - -def detect(img, conf, iou): - parser = argparse.ArgumentParser() - #parser.add_argument('--weights', nargs='+', type=str, default="best.pt", help='model.pt path(s)') - parser.add_argument('--source', type=str, default='Inference/', help='source') # file/folder, 0 for webcam - parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') - #parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') - #parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='display results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') - parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default='runs/detect', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--trace', action='store_true', help='trace model') - opt = parser.parse_args() - img.save("Inference/test.jpg") - source, weights, view_img, save_txt, imgsz, trace = opt.source, "best.pt", opt.view_img, opt.save_txt, opt.img_size, opt.trace - save_img = True # save inference images - webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( - ('rtsp://', 'rtmp://', 'http://', 'https://')) - - # Directories - save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Initialize - set_logging() - device = select_device(opt.device) - half = False # half precision only supported on CUDA - - # Load model - model = attempt_load(weights, map_location=device) # load FP32 model - stride = int(model.stride.max()) # model stride - imgsz = check_img_size(imgsz, s=stride) # check img_size - - if trace: - model = TracedModel(model, device, opt.img_size) - - if half: - model.half() # to FP16 - - # Second-stage classifier - classify = False - if classify: - modelc = load_classifier(name='resnet101', n=2) # initialize - modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() - - # Set Dataloader - vid_path, vid_writer = None, None - if webcam: - view_img = check_imshow() - cudnn.benchmark = True # set True to speed up constant image size inference - dataset = LoadStreams(source, img_size=imgsz, stride=stride) - else: - dataset = LoadImages(source, img_size=imgsz, stride=stride) - - # Get names and colors - names = model.module.names if hasattr(model, 'module') else model.names - colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] - - # Run inference - if device.type != 'cpu': - model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once - t0 = time.time() - for path, img, im0s, vid_cap in dataset: - img = torch.from_numpy(img).to(device) - img = img.half() if half else img.float() # uint8 to fp16/32 - img /= 255.0 # 0 - 255 to 0.0 - 1.0 - if img.ndimension() == 3: - img = img.unsqueeze(0) - - # Inference - t1 = time_synchronized() - pred = model(img, augment=opt.augment)[0] - - # Apply NMS - pred = non_max_suppression(pred, conf, iou, classes=opt.classes, agnostic=opt.agnostic_nms) - t2 = time_synchronized() - - # Apply Classifier - if classify: - pred = apply_classifier(pred, modelc, img, im0s) - - # Process detections - for i, det in enumerate(pred): # detections per image - if webcam: # batch_size >= 1 - p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count - else: - p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) - - p = Path(p) # to Path - save_path = str(save_dir / p.name) # img.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt - s += '%gx%g ' % img.shape[2:] # print string - gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh - if len(det): - # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() - - # Print results - for c in det[:, -1].unique(): - n = (det[:, -1] == c).sum() # detections per class - s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string - - # Write results - for *xyxy, conf, cls in reversed(det): - if save_txt: # Write to file - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format - with open(txt_path + '.txt', 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - if save_img or view_img: # Add bbox to image - label = f'{names[int(cls)]} {conf:.2f}' - plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) - - # Print time (inference + NMS) - #print(f'{s}Done. ({t2 - t1:.3f}s)') - - # Stream results - if view_img: - cv2.imshow(str(p), im0) - cv2.waitKey(1) # 1 millisecond - - # Save results (image with detections) - if save_img: - if dataset.mode == 'image': - cv2.imwrite(save_path, im0) - else: # 'video' or 'stream' - if vid_path != save_path: # new video - vid_path = save_path - if isinstance(vid_writer, cv2.VideoWriter): - vid_writer.release() # release previous video writer - if vid_cap: # video - fps = vid_cap.get(cv2.CAP_PROP_FPS) - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - else: # stream - fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path += '.mp4' - vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - vid_writer.write(im0) - - if save_txt or save_img: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - #print(f"Results saved to {save_dir}{s}") - - print(f'Done. ({time.time() - t0:.3f}s)') - - return Image.fromarray(im0[:,:,::-1]) - - -gr.Interface(detect, - [gr.Image(type="pil"), - gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"), - gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")], - gr.Image(type="pil"), - title="Yolov7 Fire Detector", - examples = [['fire1.jpg', 0.5, 0.5],['fire-smoke.jpg',0.45,0.5]] - ).launch() diff --git a/spaces/FrankZxShen/so-vits-svc-models-pcr/cluster/kmeans.py b/spaces/FrankZxShen/so-vits-svc-models-pcr/cluster/kmeans.py deleted file mode 100644 index 6111ea45e66a15d41b5b904be6f75affd3c4369f..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/so-vits-svc-models-pcr/cluster/kmeans.py +++ /dev/null @@ -1,201 +0,0 @@ -import math,pdb -import torch,pynvml -from torch.nn.functional import normalize -from time import time -import numpy as np -# device=torch.device("cuda:0") -def _kpp(data: torch.Tensor, k: int, sample_size: int = -1): - """ Picks k points in the data based on the kmeans++ method. - - Parameters - ---------- - data : torch.Tensor - Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D - data, rank 2 multidimensional data, in which case one - row is one observation. - k : int - Number of samples to generate. - sample_size : int - sample data to avoid memory overflow during calculation - - Returns - ------- - init : ndarray - A 'k' by 'N' containing the initial centroids. - - References - ---------- - .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of - careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium - on Discrete Algorithms, 2007. - .. [2] scipy/cluster/vq.py: _kpp - """ - batch_size=data.shape[0] - if batch_size>sample_size: - data = data[torch.randint(0, batch_size,[sample_size], device=data.device)] - dims = data.shape[1] if len(data.shape) > 1 else 1 - init = torch.zeros((k, dims)).to(data.device) - r = torch.distributions.uniform.Uniform(0, 1) - for i in range(k): - if i == 0: - init[i, :] = data[torch.randint(data.shape[0], [1])] - else: - D2 = torch.cdist(init[:i, :][None, :], data[None, :], p=2)[0].amin(dim=0) - probs = D2 / torch.sum(D2) - cumprobs = torch.cumsum(probs, dim=0) - init[i, :] = data[torch.searchsorted(cumprobs, r.sample([1]).to(data.device))] - return init -class KMeansGPU: - ''' - Kmeans clustering algorithm implemented with PyTorch - - Parameters: - n_clusters: int, - Number of clusters - - max_iter: int, default: 100 - Maximum number of iterations - - tol: float, default: 0.0001 - Tolerance - - verbose: int, default: 0 - Verbosity - - mode: {'euclidean', 'cosine'}, default: 'euclidean' - Type of distance measure - - init_method: {'random', 'point', '++'} - Type of initialization - - minibatch: {None, int}, default: None - Batch size of MinibatchKmeans algorithm - if None perform full KMeans algorithm - - Attributes: - centroids: torch.Tensor, shape: [n_clusters, n_features] - cluster centroids - ''' - def __init__(self, n_clusters, max_iter=200, tol=1e-4, verbose=0, mode="euclidean",device=torch.device("cuda:0")): - self.n_clusters = n_clusters - self.max_iter = max_iter - self.tol = tol - self.verbose = verbose - self.mode = mode - self.device=device - pynvml.nvmlInit() - gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(device.index) - info = pynvml.nvmlDeviceGetMemoryInfo(gpu_handle) - self.minibatch=int(33e6/self.n_clusters*info.free/ 1024 / 1024 / 1024) - print("free_mem/GB:",info.free/ 1024 / 1024 / 1024,"minibatch:",self.minibatch) - - @staticmethod - def cos_sim(a, b): - """ - Compute cosine similarity of 2 sets of vectors - - Parameters: - a: torch.Tensor, shape: [m, n_features] - - b: torch.Tensor, shape: [n, n_features] - """ - return normalize(a, dim=-1) @ normalize(b, dim=-1).transpose(-2, -1) - - @staticmethod - def euc_sim(a, b): - """ - Compute euclidean similarity of 2 sets of vectors - Parameters: - a: torch.Tensor, shape: [m, n_features] - b: torch.Tensor, shape: [n, n_features] - """ - return 2 * a @ b.transpose(-2, -1) -(a**2).sum(dim=1)[..., :, None] - (b**2).sum(dim=1)[..., None, :] - - def max_sim(self, a, b): - """ - Compute maximum similarity (or minimum distance) of each vector - in a with all of the vectors in b - Parameters: - a: torch.Tensor, shape: [m, n_features] - b: torch.Tensor, shape: [n, n_features] - """ - if self.mode == 'cosine': - sim_func = self.cos_sim - elif self.mode == 'euclidean': - sim_func = self.euc_sim - sim = sim_func(a, b) - max_sim_v, max_sim_i = sim.max(dim=-1) - return max_sim_v, max_sim_i - - def fit_predict(self, X): - """ - Combination of fit() and predict() methods. - This is faster than calling fit() and predict() seperately. - Parameters: - X: torch.Tensor, shape: [n_samples, n_features] - centroids: {torch.Tensor, None}, default: None - if given, centroids will be initialized with given tensor - if None, centroids will be randomly chosen from X - Return: - labels: torch.Tensor, shape: [n_samples] - - mini_=33kk/k*remain - mini=min(mini_,fea_shape) - offset=log2(k/1000)*1.5 - kpp_all=min(mini_*10/offset,fea_shape) - kpp_sample=min(mini_/12/offset,fea_shape) - """ - assert isinstance(X, torch.Tensor), "input must be torch.Tensor" - assert X.dtype in [torch.half, torch.float, torch.double], "input must be floating point" - assert X.ndim == 2, "input must be a 2d tensor with shape: [n_samples, n_features] " - # print("verbose:%s"%self.verbose) - - offset = np.power(1.5,np.log(self.n_clusters / 1000))/np.log(2) - with torch.no_grad(): - batch_size= X.shape[0] - # print(self.minibatch, int(self.minibatch * 10 / offset), batch_size) - start_time = time() - if (self.minibatch*10//offset< batch_size): - x = X[torch.randint(0, batch_size,[int(self.minibatch*10/offset)])].to(self.device) - else: - x = X.to(self.device) - # print(x.device) - self.centroids = _kpp(x, self.n_clusters, min(int(self.minibatch/12/offset),batch_size)) - del x - torch.cuda.empty_cache() - # self.centroids = self.centroids.to(self.device) - num_points_in_clusters = torch.ones(self.n_clusters, device=self.device, dtype=X.dtype)#全1 - closest = None#[3098036]#int64 - if(self.minibatch>=batch_size//2 and self.minibatch=batch_size): - X=X.to(self.device) - for i in range(self.max_iter): - iter_time = time() - if self.minibatch= 2: - print('iter:', i, 'error:', error.item(), 'time spent:', round(time()-iter_time, 4)) - if error <= self.tol: - break - - if self.verbose >= 1: - print(f'used {i+1} iterations ({round(time()-start_time, 4)}s) to cluster {batch_size} items into {self.n_clusters} clusters') - return closest diff --git a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/download.py b/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/download.py deleted file mode 100644 index 83c4a76d6e6a1bae9068127933a77c94812131af..0000000000000000000000000000000000000000 --- a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/download.py +++ /dev/null @@ -1,71 +0,0 @@ -import os -from functools import lru_cache -from typing import Dict, Optional - -import requests -import torch as th -from filelock import FileLock -from tqdm.auto import tqdm - -MODEL_PATHS = { - # "base": "https://huggingface.co/datasets/asifhugs/weights/blob/main/base.pt", - # "upsample": "https://huggingface.co/datasets/asifhugs/weights/blob/main/upsample.pt", - "base-inpaint": "https://openaipublic.blob.core.windows.net/diffusion/dec-2021/base_inpaint.pt", - "upsample-inpaint": "https://openaipublic.blob.core.windows.net/diffusion/dec-2021/upsample_inpaint.pt", - "clip/image-enc": "https://openaipublic.blob.core.windows.net/diffusion/dec-2021/clip_image_enc.pt", - "clip/text-enc": "https://openaipublic.blob.core.windows.net/diffusion/dec-2021/clip_text_enc.pt", -} - - -@lru_cache() -def default_cache_dir() -> str: - return os.path.join(os.path.abspath(os.getcwd()), "glide_model_cache") - - -def fetch_file_cached( - url: str, progress: bool = True, cache_dir: Optional[str] = None, chunk_size: int = 4096 -) -> str: - """ - Download the file at the given URL into a local file and return the path. - - If cache_dir is specified, it will be used to download the files. - Otherwise, default_cache_dir() is used. - """ - if cache_dir is None: - cache_dir = default_cache_dir() - os.makedirs(cache_dir, exist_ok=True) - response = requests.get(url, stream=True) - size = int(response.headers.get("content-length", "0")) - local_path = os.path.join(cache_dir, url.split("/")[-1]) - with FileLock(local_path + ".lock"): - if os.path.exists(local_path): - return local_path - if progress: - pbar = tqdm(total=size, unit="iB", unit_scale=True) - tmp_path = local_path + ".tmp" - with open(tmp_path, "wb") as f: - for chunk in response.iter_content(chunk_size): - if progress: - pbar.update(len(chunk)) - f.write(chunk) - os.rename(tmp_path, local_path) - if progress: - pbar.close() - return local_path - - -def load_checkpoint( - checkpoint_name: str, - device: th.device, - progress: bool = True, - cache_dir: Optional[str] = None, - chunk_size: int = 4096, -) -> Dict[str, th.Tensor]: - if checkpoint_name not in MODEL_PATHS: - raise ValueError( - f"Unknown checkpoint name {checkpoint_name}. Known names are: {MODEL_PATHS.keys()}." - ) - path = fetch_file_cached( - MODEL_PATHS[checkpoint_name], progress=progress, cache_dir=cache_dir, chunk_size=chunk_size - ) - return th.load(path, map_location=device) diff --git a/spaces/Giuliano/breast_cancer_prediction_tfjs/README.md b/spaces/Giuliano/breast_cancer_prediction_tfjs/README.md deleted file mode 100644 index 035b677275a22c9350a4c59cd99ab1a0e15cfc82..0000000000000000000000000000000000000000 --- a/spaces/Giuliano/breast_cancer_prediction_tfjs/README.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Breast_cancer_prediction_tfjs -emoji: 📚 -colorFrom: indigo -colorTo: red -sdk: static -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/sep_fcn_head.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/sep_fcn_head.py deleted file mode 100644 index a636f702e72d12fd6ff2821fd10923f682f805f1..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/sep_fcn_head.py +++ /dev/null @@ -1,51 +0,0 @@ -from mmcv.cnn import DepthwiseSeparableConvModule - -from ..builder import HEADS -from .fcn_head import FCNHead - - -@HEADS.register_module() -class DepthwiseSeparableFCNHead(FCNHead): - """Depthwise-Separable Fully Convolutional Network for Semantic - Segmentation. - - This head is implemented according to Fast-SCNN paper. - Args: - in_channels(int): Number of output channels of FFM. - channels(int): Number of middle-stage channels in the decode head. - concat_input(bool): Whether to concatenate original decode input into - the result of several consecutive convolution layers. - Default: True. - num_classes(int): Used to determine the dimension of - final prediction tensor. - in_index(int): Correspond with 'out_indices' in FastSCNN backbone. - norm_cfg (dict | None): Config of norm layers. - align_corners (bool): align_corners argument of F.interpolate. - Default: False. - loss_decode(dict): Config of loss type and some - relevant additional options. - """ - - def __init__(self, **kwargs): - super(DepthwiseSeparableFCNHead, self).__init__(**kwargs) - self.convs[0] = DepthwiseSeparableConvModule( - self.in_channels, - self.channels, - kernel_size=self.kernel_size, - padding=self.kernel_size // 2, - norm_cfg=self.norm_cfg) - for i in range(1, self.num_convs): - self.convs[i] = DepthwiseSeparableConvModule( - self.channels, - self.channels, - kernel_size=self.kernel_size, - padding=self.kernel_size // 2, - norm_cfg=self.norm_cfg) - - if self.concat_input: - self.conv_cat = DepthwiseSeparableConvModule( - self.in_channels + self.channels, - self.channels, - kernel_size=self.kernel_size, - padding=self.kernel_size // 2, - norm_cfg=self.norm_cfg) diff --git a/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/lpips/__init__.py b/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/lpips/__init__.py deleted file mode 100644 index a4f86b7ee229b333a64f16d0091e988492f99c58..0000000000000000000000000000000000000000 --- a/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/lpips/__init__.py +++ /dev/null @@ -1,160 +0,0 @@ - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -from skimage.measure import compare_ssim -import torch -from torch.autograd import Variable - -from lpips import dist_model - -class PerceptualLoss(torch.nn.Module): - def __init__(self, model='net-lin', net='alex', colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0]): # VGG using our perceptually-learned weights (LPIPS metric) - # def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss - super(PerceptualLoss, self).__init__() - print('Setting up Perceptual loss...') - self.use_gpu = use_gpu - self.spatial = spatial - self.gpu_ids = gpu_ids - self.model = dist_model.DistModel() - self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace, spatial=self.spatial, gpu_ids=gpu_ids) - print('...[%s] initialized'%self.model.name()) - print('...Done') - - def forward(self, pred, target, normalize=False): - """ - Pred and target are Variables. - If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1] - If normalize is False, assumes the images are already between [-1,+1] - - Inputs pred and target are Nx3xHxW - Output pytorch Variable N long - """ - - if normalize: - target = 2 * target - 1 - pred = 2 * pred - 1 - - return self.model.forward(target, pred) - -def normalize_tensor(in_feat,eps=1e-10): - norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1,keepdim=True)) - return in_feat/(norm_factor+eps) - -def l2(p0, p1, range=255.): - return .5*np.mean((p0 / range - p1 / range)**2) - -def psnr(p0, p1, peak=255.): - return 10*np.log10(peak**2/np.mean((1.*p0-1.*p1)**2)) - -def dssim(p0, p1, range=255.): - return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2. - -def rgb2lab(in_img,mean_cent=False): - from skimage import color - img_lab = color.rgb2lab(in_img) - if(mean_cent): - img_lab[:,:,0] = img_lab[:,:,0]-50 - return img_lab - -def tensor2np(tensor_obj): - # change dimension of a tensor object into a numpy array - return tensor_obj[0].cpu().float().numpy().transpose((1,2,0)) - -def np2tensor(np_obj): - # change dimenion of np array into tensor array - return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1))) - -def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False): - # image tensor to lab tensor - from skimage import color - - img = tensor2im(image_tensor) - img_lab = color.rgb2lab(img) - if(mc_only): - img_lab[:,:,0] = img_lab[:,:,0]-50 - if(to_norm and not mc_only): - img_lab[:,:,0] = img_lab[:,:,0]-50 - img_lab = img_lab/100. - - return np2tensor(img_lab) - -def tensorlab2tensor(lab_tensor,return_inbnd=False): - from skimage import color - import warnings - warnings.filterwarnings("ignore") - - lab = tensor2np(lab_tensor)*100. - lab[:,:,0] = lab[:,:,0]+50 - - rgb_back = 255.*np.clip(color.lab2rgb(lab.astype('float')),0,1) - if(return_inbnd): - # convert back to lab, see if we match - lab_back = color.rgb2lab(rgb_back.astype('uint8')) - mask = 1.*np.isclose(lab_back,lab,atol=2.) - mask = np2tensor(np.prod(mask,axis=2)[:,:,np.newaxis]) - return (im2tensor(rgb_back),mask) - else: - return im2tensor(rgb_back) - -def rgb2lab(input): - from skimage import color - return color.rgb2lab(input / 255.) - -def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.): - image_numpy = image_tensor[0].cpu().float().numpy() - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor - return image_numpy.astype(imtype) - -def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.): - return torch.Tensor((image / factor - cent) - [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) - -def tensor2vec(vector_tensor): - return vector_tensor.data.cpu().numpy()[:, :, 0, 0] - -def voc_ap(rec, prec, use_07_metric=False): - """ ap = voc_ap(rec, prec, [use_07_metric]) - Compute VOC AP given precision and recall. - If use_07_metric is true, uses the - VOC 07 11 point method (default:False). - """ - if use_07_metric: - # 11 point metric - ap = 0. - for t in np.arange(0., 1.1, 0.1): - if np.sum(rec >= t) == 0: - p = 0 - else: - p = np.max(prec[rec >= t]) - ap = ap + p / 11. - else: - # correct AP calculation - # first append sentinel values at the end - mrec = np.concatenate(([0.], rec, [1.])) - mpre = np.concatenate(([0.], prec, [0.])) - - # compute the precision envelope - for i in range(mpre.size - 1, 0, -1): - mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) - - # to calculate area under PR curve, look for points - # where X axis (recall) changes value - i = np.where(mrec[1:] != mrec[:-1])[0] - - # and sum (\Delta recall) * prec - ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) - return ap - -def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.): -# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.): - image_numpy = image_tensor[0].cpu().float().numpy() - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor - return image_numpy.astype(imtype) - -def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.): -# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.): - return torch.Tensor((image / factor - cent) - [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/multilingual/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/multilingual/__init__.py deleted file mode 100644 index 6264236915a7269a4d920ee8213004374dd86a9a..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/multilingual/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/utils/inference/num_to_word_on_sent.py b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/utils/inference/num_to_word_on_sent.py deleted file mode 100644 index ce878a8c3ee6f5ef629abeaee418d5959f7179ed..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/utils/inference/num_to_word_on_sent.py +++ /dev/null @@ -1,1314 +0,0 @@ -import re -import string - -# ----------------------------- indic_num.py ----------------------------- -supported_lang = {"en", "hi", "gu", "mr", "bn", "te", "ta", "kn", "or", "pa"} -# supported_lang = {'eng', 'hin', 'guj', 'mar', 'ben', 'tel', 'tam', 'kan', 'ori', 'pan'} # Three alphabet lang code - -all_num = { - "en": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], - "hi": ["०", "१", "२", "३", "४", "५", "६", "७", "८", "९"], - "gu": ["૦", "૧", "૨", "૩", "૪", "૫", "૬", "૭", "૮", "૯"], - "mr": ["०", "१", "२", "३", "४", "५", "६", "७", "८", "९"], - "bn": ["০", "১", "২", "৩", "৪", "৫", "৬", "৭", "৮", "৯"], - "te": ["౦", "౧", "౨", "౩", "౪", "౫", "౬", "౭", "౮", "౯"], - "ta": ["0", "௧", "௨", "௩", "௪", "௫", "௬", "௭", "௮", "௯", "௰"], - "kn": ["೦", "೧", "೨", "೩", "೪", "೫", "೬", "೭", "೮", "೯"], - "or": ["୦", "୧", "୨", "୩", "୪", "୫", "୬", "୭", "୮", "୯"], - "pa": ["੦", "੧", "੨", "੩", "੪", "੫", "੬", "੭", "੮", "੯"], -} - -num_dict = dict() -num_dict["en"] = { - "0": "zero", - "1": "one", - "2": "two", - "3": "three", - "4": "four", - "5": "five", - "6": "six", - "7": "seven", - "8": "eight", - "9": "nine", - "10": "ten", - "11": "eleven", - "12": "twelve", - "13": "thirteen", - "14": "fourteen", - "15": "fifteen", - "16": "sixteen", - "17": "seventeen", - "18": "eighteen", - "19": "nineteen", - "20": "twenty", - "21": "twenty-one", - "22": "twenty-two", - "23": "twenty-three", - "24": "twenty-four", - "25": "twenty-five", - "26": "twenty-six", - "27": "twenty-seven", - "28": "twenty-eight", - "29": "twenty-nine", - "30": "thirty", - "31": "thirty-one", - "32": "thirty-two", - "33": "thirty-three", - "34": "thirty-four", - "35": "thirty-five", - "36": "thirty-six", - "37": "thirty-seven", - "38": "thirty-eight", - "39": "thirty-nine", - "40": "forty", - "41": "forty-one", - "42": "forty-two", - "43": "forty-three", - "44": "forty-four", - "45": "forty-five", - "46": "forty-six", - "47": "forty-seven", - "48": "forty-eight", - "49": "forty-nine", - "50": "fifty", - "51": "fifty-one", - "52": "fifty-two", - "53": "fifty-three", - "54": "fifty-four", - "55": "fifty-five", - "56": "fifty-six", - "57": "fifty-seven", - "58": "fifty-eight", - "59": "fifty-nine", - "60": "sixty", - "61": "sixty-one", - "62": "sixty-two", - "63": "sixty-three", - "64": "sixty-four", - "65": "sixty-five", - "66": "sixty-six", - "67": "sixty-seven", - "68": "sixty-eight", - "69": "sixty-nine", - "70": "seventy", - "71": "seventy-one", - "72": "seventy-two", - "73": "seventy-three", - "74": "seventy-four", - "75": "seventy-five", - "76": "seventy-six", - "77": "seventy-seven", - "78": "seventy-eight", - "79": "seventy-nine", - "80": "eighty", - "81": "eighty-one", - "82": "eighty-two", - "83": "eighty-three", - "84": "eighty-four", - "85": "eighty-five", - "86": "eighty-six", - "87": "eighty-seven", - "88": "eighty-eight", - "89": "eighty-nine", - "90": "ninety", - "91": "ninety-one", - "92": "ninety-two", - "93": "ninety-three", - "94": "ninety-four", - "95": "ninety-five", - "96": "ninety-six", - "97": "ninety-seven", - "98": "ninety-eight", - "99": "ninety-nine", - "100": "hundred", - "1000": "thousand", - "100000": "lac", - "10000000": "crore", - "1000000000": "arab", -} # English-India -num_dict["hi"] = { - "0": "शून्य", - "1": "एक", - "2": "दो", - "3": "तीन", - "4": "चार", - "5": "पाँच", - "6": "छः", - "7": "सात", - "8": "आठ", - "9": "नौ", - "10": "दस", - "11": "ग्यारह", - "12": "बारह", - "13": "तेरह", - "14": "चौदह", - "15": "पंद्रह", - "16": "सोलह", - "17": "सत्रह", - "18": "अट्ठारह", - "19": "उन्नीस", - "20": "बीस", - "21": "इक्कीस", - "22": "बाईस", - "23": "तेईस", - "24": "चौबिस", - "25": "पच्चीस", - "26": "छब्बीस", - "27": "सत्ताईस", - "28": "अट्ठाईस", - "29": "उनतीस", - "30": "तीस", - "31": "इकतीस", - "32": "बत्तीस", - "33": "तैंतीस", - "34": "चौंतीस", - "35": "पैंतीस", - "36": "छत्तीस", - "37": "सैंतीस", - "38": "अड़तीस", - "39": "उनतालीस", - "40": "चालीस", - "41": "इकतालीस", - "42": "बयालीस", - "43": "तैंतालीस", - "44": "चौंतालीस", - "45": "पैंतालीस", - "46": "छियालीस", - "47": "सैंतालीस", - "48": "अड़तालीस", - "49": "उनचास", - "50": "पचास", - "51": "इक्यावन​", - "52": "बावन", - "53": "तिरेपन", - "54": "चौवन", - "55": "पचपन", - "56": "छप्पन", - "57": "सत्तावन", - "58": "अट्ठावन", - "59": "उनसठ", - "60": "साठ", - "61": "इकसठ", - "62": "बासठ", - "63": "तिरेसठ", - "64": "चौंसठ", - "65": "पैंसठ", - "66": "छयासठ", - "67": "सरसठ​", - "68": "अड़सठ", - "69": "उनहत्तर", - "70": "सत्तर", - "71": "इकहत्तर", - "72": "बहत्तर", - "73": "तिहत्तर", - "74": "चौहत्तर", - "75": "पचहत्तर", - "76": "छिहत्तर", - "77": "सतहत्तर", - "78": "अठहत्तर", - "79": "उन्यासी", - "80": "अस्सी", - "81": "इक्यासी", - "82": "बयासी", - "83": "तिरासी", - "84": "चौरासी", - "85": "पचासी", - "86": "छियासी", - "87": "सत्तासी", - "88": "अठासी", - "89": "नवासी", - "90": "नब्बे", - "91": "इक्यानवे", - "92": "बानवे", - "93": "तिरानवे", - "94": "चौरानवे", - "95": "पचानवे", - "96": "छियानवे", - "97": "सत्तानवे", - "98": "अट्ठानवे", - "99": "निन्यानवे", - "100": "सौ", - "1000": "हज़ार", - "100000": "लाख", - "10000000": "करोड़", - "1000000000": "अरब", -} # Hindi -num_dict["gu"] = { - "0": "શૂન્ય", - "1": "એક", - "2": "બે", - "3": "ત્રણ", - "4": "ચાર", - "5": "પાંચ", - "6": "છ", - "7": "સાત", - "8": "આઠ", - "9": "નવ", - "10": "દસ", - "11": "અગિયાર", - "12": "બાર", - "13": "તેર", - "14": "ચૌદ", - "15": "પંદર", - "16": "સોળ", - "17": "સત્તર", - "18": "અઢાર", - "19": "ઓગણિસ", - "20": "વીસ", - "21": "એકવીસ", - "22": "બાવીસ", - "23": "તેવીસ", - "24": "ચોવીસ", - "25": "પચ્ચીસ", - "26": "છવીસ", - "27": "સત્તાવીસ", - "28": "અઠ્ઠાવીસ", - "29": "ઓગણત્રીસ", - "30": "ત્રીસ", - "31": "એકત્રીસ", - "32": "બત્રીસ", - "33": "તેત્રીસ", - "34": "ચોત્રીસ", - "35": "પાંત્રીસ", - "36": "છત્રીસ", - "37": "સડત્રીસ", - "38": "અડત્રીસ", - "39": "ઓગણચાલીસ", - "40": "ચાલીસ", - "41": "એકતાલીસ", - "42": "બેતાલીસ", - "43": "ત્રેતાલીસ", - "44": "ચુંમાલીસ", - "45": "પિસ્તાલીસ", - "46": "છેતાલીસ", - "47": "સુડતાલીસ", - "48": "અડતાલીસ", - "49": "ઓગણપચાસ", - "50": "પચાસ", - "51": "એકાવન", - "52": "બાવન", - "53": "ત્રેપન", - "54": "ચોપન", - "55": "પંચાવન", - "56": "છપ્પન", - "57": "સત્તાવન", - "58": "અઠ્ઠાવન", - "59": "ઓગણસાઠ", - "60": "સાઈઠ", - "61": "એકસઠ", - "62": "બાસઠ", - "63": "ત્રેસઠ", - "64": "ચોસઠ", - "65": "પાંસઠ", - "66": "છાસઠ", - "67": "સડસઠ", - "68": "અડસઠ", - "69": "અગણોસિત્તેર", - "70": "સિત્તેર", - "71": "એકોતેર", - "72": "બોતેર", - "73": "તોતેર", - "74": "ચુમોતેર", - "75": "પંચોતેર", - "76": "છોતેર", - "77": "સિત્યોતેર", - "78": "ઇઠ્યોતેર", - "79": "ઓગણાએંસી", - "80": "એંસી", - "81": "એક્યાસી", - "82": "બ્યાસી", - "83": "ત્યાસી", - "84": "ચોર્યાસી", - "85": "પંચાસી", - "86": "છ્યાસી", - "87": "સિત્યાસી", - "88": "ઈઠ્યાસી", - "89": "નેવ્યાસી", - "90": "નેવું", - "91": "એકાણું", - "92": "બાણું", - "93": "ત્રાણું", - "94": "ચોરાણું", - "95": "પંચાણું", - "96": "છન્નું", - "97": "સત્તાણું", - "98": "અઠ્ઠાણું", - "99": "નવ્વાણું", - "100": "સો", - "1000": "હજાર", - "100000": "લાખ", - "1000000": "દસ લાખ", - "10000000": "કરોડ઼", -} # Gujarati -num_dict["mr"] = { - "0": "शून्य", - "1": "एक", - "2": "दोन", - "3": "तीन", - "4": "चार", - "5": "पाच", - "6": "सहा", - "7": "सात", - "8": "आठ", - "9": "नऊ", - "10": "दहा", - "11": "अकरा", - "12": "बारा", - "13": "तेरा", - "14": "चौदा", - "15": "पंधरा", - "16": "सोळा", - "17": "सतरा", - "18": "अठरा", - "19": "एकोणीस", - "20": "वीस", - "21": "एकवीस", - "22": "बावीस", - "23": "तेवीस", - "24": "चोवीस", - "25": "पंचवीस", - "26": "सव्वीस", - "27": "सत्तावीस", - "28": "अठ्ठावीस", - "29": "एकोणतीस", - "30": "तीस", - "31": "एकतीस", - "32": "बत्तीस", - "33": "तेहेतीस", - "34": "चौतीस", - "35": "पस्तीस", - "36": "छत्तीस", - "37": "सदतीस", - "38": "अडतीस", - "39": "एकोणचाळीस", - "40": "चाळीस", - "41": "एक्केचाळीस", - "42": "बेचाळीस", - "43": "त्रेचाळीस", - "44": "चव्वेचाळीस", - "45": "पंचेचाळीस", - "46": "सेहेचाळीस", - "47": "सत्तेचाळीस", - "48": "अठ्ठेचाळीस", - "49": "एकोणपन्नास", - "50": "पन्नास", - "51": "एक्कावन्न", - "52": "बावन्न", - "53": "त्रेपन्न", - "54": "चोपन्न", - "55": "पंचावन्न", - "56": "छप्पन्न", - "57": "सत्तावन्न", - "58": "अठ्ठावन्न", - "59": "एकोणसाठ", - "60": "साठ", - "61": "एकसष्ठ", - "62": "बासष्ठ", - "63": "त्रेसष्ठ", - "64": "चौसष्ठ", - "65": "पासष्ठ", - "66": "सहासष्ठ", - "67": "सदुसष्ठ", - "68": "अडुसष्ठ", - "69": "एकोणसत्तर", - "70": "सत्तर", - "71": "एक्काहत्तर", - "72": "बाहत्तर", - "73": "त्र्याहत्तर", - "74": "चौर्‍याहत्तर", - "75": "पंच्याहत्तर", - "76": "शहात्तर", - "77": "सत्याहत्तर", - "78": "अठ्ठ्याहत्तर", - "79": "एकोण ऐंशी", - "80": "ऐंशी", - "81": "एक्क्याऐंशी", - "82": "ब्याऐंशी", - "83": "त्र्याऐंशी", - "84": "चौऱ्याऐंशी", - "85": "पंच्याऐंशी", - "86": "शहाऐंशी", - "87": "सत्त्याऐंशी", - "88": "अठ्ठ्याऐंशी", - "89": "एकोणनव्वद", - "90": "नव्वद", - "91": "एक्क्याण्णव", - "92": "ब्याण्णव", - "93": "त्र्याण्णव", - "94": "चौऱ्याण्णव", - "95": "पंच्याण्णव", - "96": "शहाण्णव", - "97": "सत्त्याण्णव", - "98": "अठ्ठ्याण्णव", - "99": "नव्व्याण्णव", - "100": "शे", - "1000": "हजार", - "100000": "लाख", - "10000000": "कोटी", - "1000000000": "अब्ज", -} # Marathi -num_dict["bn"] = { - "0": "শূন্য", - "1": "এক", - "2": "দুই", - "3": "তিন", - "4": "চার", - "5": "পাঁচ", - "6": "ছয়", - "7": "সাত", - "8": "আট", - "9": "নয়", - "10": "দশ", - "11": "এগার", - "12": "বার", - "13": "তের", - "14": "চৌদ্দ", - "15": "পনের", - "16": "ষোল", - "17": "সতের", - "18": "আঠার", - "19": "ঊনিশ", - "20": "বিশ", - "21": "একুশ", - "22": "বাইশ", - "23": "তেইশ", - "24": "চব্বিশ", - "25": "পঁচিশ", - "26": "ছাব্বিশ", - "27": "সাতাশ", - "28": "আঠাশ", - "29": "ঊনত্রিশ", - "30": "ত্রিশ", - "31": "একত্রিশ", - "32": "বত্রিশ", - "33": "তেত্রিশ", - "34": "চৌত্রিশ", - "35": "পঁয়ত্রিশ", - "36": "ছত্রিশ", - "37": "সাঁইত্রিশ", - "38": "আটত্রিশ", - "39": "ঊনচল্লিশ", - "40": "চল্লিশ", - "41": "একচল্লিশ", - "42": "বিয়াল্লিশ", - "43": "তেতাল্লিশ", - "44": "চুয়াল্লিশ", - "45": "পঁয়তাল্লিশ", - "46": "ছেচল্লিশ", - "47": "সাতচল্লিশ", - "48": "আটচল্লিশ", - "49": "ঊনপঞ্চাশ", - "50": "পঞ্চাশ", - "51": "একান্ন", - "52": "বায়ান্ন", - "53": "তিপ্পান্ন", - "54": "চুয়ান্ন", - "55": "পঞ্চান্ন", - "56": "ছাপ্পান্ন", - "57": "সাতান্ন", - "58": "আটান্ন", - "59": "ঊনষাট", - "60": "ষাট", - "61": "একষট্টি", - "62": "বাষট্টি", - "63": "তেষট্টি", - "64": "চৌষট্টি", - "65": "পঁয়ষট্টি", - "66": "ছেষট্টি", - "67": "সাতষট্টি", - "68": "আটষট্টি", - "69": "ঊনসত্তর", - "70": "সত্তর", - "71": "একাত্তর", - "72": "বাহাত্তর", - "73": "তিয়াত্তর", - "74": "চুয়াত্তর", - "75": "পঁচাত্তর", - "76": "ছিয়াত্তর", - "77": "সাতাত্তর", - "78": "আটাত্তর", - "79": "ঊনআশি", - "80": "আশি", - "81": "একাশি", - "82": "বিরাশি", - "83": "তিরাশি", - "84": "চুরাশি", - "85": "পঁচাশি", - "86": "ছিয়াশি", - "87": "সাতাশি", - "88": "আটাশি", - "89": "ঊননব্বই", - "90": "নব্বই", - "91": "একানব্বই", - "92": "বিরানব্বই", - "93": "তিরানব্বই", - "94": "চুরানব্বই", - "95": "পঁচানব্বই", - "96": "ছিয়ানব্বই", - "97": "সাতানব্বই", - "98": "আটানব্বই", - "99": "নিরানব্বই", - "100": "শো", - "1000": "হাজার", - "100000": "লাখ", - "10000000": "কোটি", - "1000000000": "একশ’ কোটি", -} # Bengali -num_dict["te"] = { - "0": "సున్నా", - "1": "ఒకటి", - "2": "రెండు", - "3": "మూడు", - "4": "నాలుగు", - "5": "ఐదు", - "6": "ఆరు", - "7": "ఏడు", - "8": "ఎనిమిది", - "9": "తొమ్మిది", - "10": "పది", - "11": "పదకొండు", - "12": "పన్నెండు", - "13": "పదమూడు", - "14": "పద్నాలుగు", - "15": "పదిహేను", - "16": "పదహారు", - "17": "పదిహేడు", - "18": "పద్దెనిమిది", - "19": "పందొమ్మిది", - "20": "ఇరవై", - "21": "ఇరవై ఒకటి", - "22": "ఇరవై రెండు", - "23": "ఇరవై మూడు", - "24": "ఇరవై నాలుగు", - "25": "ఇరవై ఐదు", - "26": "ఇరవై ఆరు", - "27": "ఇరవై ఏడు", - "28": "ఇరవై ఎనిమిది", - "29": "ఇరవై తొమ్మిది", - "30": "ముప్పై", - "31": "ముప్పై ఒకటి", - "32": "ముప్పై రెండు", - "33": "ముప్పై మూడు", - "34": "ముప్పై నాలుగు", - "35": "ముప్పై ఐదు", - "36": "ముప్పై ఆరు", - "37": "ముప్పై ఏడు", - "38": "ముప్పై ఎనిమిది", - "39": "ముప్పై తొమ్మిది", - "40": "నలభై", - "41": "నలభై ఒకటి", - "42": "నలభై రెండు", - "43": "నలభై మూడు", - "44": "నలభై నాలుగు", - "45": "నలభై ఐదు", - "46": "నలభై ఆరు", - "47": "నలభై ఏడు", - "48": "నలభై ఎనిమిది", - "49": "నలభై తొమ్మిది", - "50": "యాభై", - "51": "యాభై ఒకటి", - "52": "యాభై రెండు", - "53": "యాభై మూడు", - "54": "యాభై నాలుగు", - "55": "యాభై ఐదు", - "56": "యాభై ఆరు", - "57": "యాభై ఏడు", - "58": "యాభై ఎనిమిది", - "59": "యాభై తొమ్మిది", - "60": "అరవై", - "61": "అరవై ఒకటి", - "62": "అరవై రెండు", - "63": "అరవై మూడు", - "64": "అరవై నాలుగు", - "65": "అరవై ఐదు", - "66": "అరవై ఆరు", - "67": "అరవై ఏడు", - "68": "అరవై ఎనిమిది", - "69": "అరవై తొమ్మిది", - "70": "డెబ్బై", - "71": "డెబ్బై ఒకటి", - "72": "డెబ్బై రెండు", - "73": "డెబ్బై మూడు", - "74": "డెబ్బై నాలుగు", - "75": "డెబ్బై ఐదు", - "76": "డెబ్బై ఆరు", - "77": "డెబ్బై ఏడు", - "78": "డెబ్బై ఎనిమిది", - "79": "డెబ్బై తొమ్మిది", - "80": "ఎనభై", - "81": "ఎనభై ఒకటి", - "82": "ఎనభై రెండు", - "83": "ఎనభై మూడు", - "84": "ఎనభై నాలుగు", - "85": "ఎనభై ఐదు", - "86": "ఎనభై ఆరు", - "87": "ఎనభై ఏడు", - "88": "ఎనభై ఎనిమిది", - "89": "ఎనభై తొమ్మిది", - "90": "తొంభై", - "91": "తొంభై ఒకటి", - "92": "తొంభై రెండు", - "93": "తొంభై మూడు", - "94": "తొంభై నాలుగు", - "95": "తొంభై ఐదు", - "96": "తొంభై ఆరు", - "97": "తొంభై ఏడు", - "98": "తొంభై ఎనిమిది", - "99": "తొంభై తొమ్మిది", - "100": "వందల", - "1000": "వేల", - "100000": "లక్షల", - "10000000": "కోట్ల", - "1000000000": "బిలియన్", -} # Telugu -num_dict["ta"] = { - "0": "பூஜ்ஜியம்", - "1": "ஒன்று", - "2": "இரண்டு", - "3": "மூன்று", - "4": "நான்கு", - "5": "ஐந்து", - "6": "ஆறு", - "7": "ஏழு", - "8": "எட்டு", - "9": "ஒன்பது", - "10": "பத்து", - "11": "பதினொன்று", - "12": "பன்னிரண்டு", - "13": "பதிமூன்று", - "14": "பதினான்கு", - "15": "பதினைந்து", - "16": "பதினாறு", - "17": "பதினேழு", - "18": "பதினெட்டு", - "19": "பத்தொன்பது", - "20": "இருபது", - "21": "இருபது ஒன்று", - "22": "இருபத்து இரண்டு", - "23": "இருபத்து மூன்று", - "24": "இருபத்து நான்கு", - "25": "இருபத்து ஐந்து", - "26": "இருபத்து ஆறு", - "27": "இருபத்து ஏழு", - "28": "இருபத்து எட்டு", - "29": "இருபத்து ஒன்பது", - "30": "முப்பது", - "31": "முப்பத்து ஒன்று", - "32": "முப்பத்து இரண்டு", - "33": "முப்பத்து மூன்று", - "34": "முப்பத்து நான்கு", - "35": "முப்பத்து ஐந்து", - "36": "முப்பத்து ஆறு", - "37": "முப்பத்து ஏழு", - "38": "முப்பத்து எட்டு", - "39": "முப்பத்து ஒன்பது", - "40": "நாற்பது", - "41": "நாற்பத்து ஒன்று", - "42": "நாற்பத்து இரண்டு", - "43": "நாற்பத்து மூன்று", - "44": "நாற்பத்து நான்கு", - "45": "நாற்பத்து ஐந்து", - "46": "நாற்பத்து ஆறு", - "47": " நாற்பத்து ஏழு", - "48": "நாற்பத்து எட்டு", - "49": "நாற்பத்து ஒன்பது", - "50": "ஐம்பது", - "51": "ஐம்பத்து ஒன்று", - "52": "ஐம்பத்து இரண்டு", - "53": "ஐம்பத்து மூன்று", - "54": "ஐம்பத்து நான்கு", - "55": "ஐம்பத்து ஐந்து", - "56": "ஐம்பத்து ஆறு", - "57": "ஐம்பத்து ஏழு", - "58": "ஐம்பத்து எட்டு", - "59": "ஐம்பத்து ஒன்பது", - "60": "அறுபது", - "61": "அறுபத்து ஒன்று", - "62": "அறுபத்து இரண்டு", - "63": "அறுபத்து மூன்று", - "64": "அறுபத்து நான்கு", - "65": "அறுபத்து ஐந்து", - "66": "அறுபத்து ஆறு", - "67": "அறுபத்து ஏழு", - "68": "அறுபத்து எட்டு", - "69": "அறுபத்து ஒன்பது", - "70": "எழுபது", - "71": "எழுபத்தி ஒன்று", - "72": "எழுபத்தி இரண்டு", - "73": "எழுபத்தி முச்சக்கர", - "74": "எழுபத்தி நான்கு", - "75": "எழுபத்தி ஐந்து", - "76": "எழுபத்தி ஆறு", - "77": "எழுபத்தி ஏழு", - "78": "எழுபத்தி எட்டு", - "79": "எழுபத்தி ஒன்பது", - "80": "எண்பது", - "81": "எண்பத்தியொன்று", - "82": "எண்பத்திரண்டு", - "83": "எண்பத்திமூன்று", - "84": "என்பதினான்கு", - "85": "என்பதினைந்து", - "86": "எண்பத்திஆறு", - "87": "எண்பத்திஏழு", - "88": "எண்பத்தியெட்டு", - "89": "எண்பத்தியொன்பது", - "90": "தொன்னூறு", - "91": "தொண்ணூற்றியொன்று", - "92": "தொண்ணூற்றிரண்டு", - "93": "தொண்ணூற்றிமூன்று", - "94": "தொண்ணூற்றிநான்கு", - "95": "தொண்ணூற்றிஐந்து", - "96": "தொண்ணூற்றியாறு", - "97": "தொண்ணூற்றியேழு", - "98": "தொண்ணூற்றியெட்டு", - "99": "தொண்ணூற்றிஒன்பது", - "100": "நூறு", - "1000": "ஆயிரம்", - "100000": "இலட்சம்", - "10000000": "கோடி", - "1000000000": "பில்லியன்", -} # Tamil -num_dict["kn"] = { - "0": "ಸೊನ್ನೆ", - "1": "ಒಂದು", - "2": "ಎರಡು", - "3": "ಮೂರು", - "4": "ನಾಲ್ಕು", - "5": "ಅಯ್ದು", - "6": "ಆರು", - "7": "ಏಳು", - "8": "ಎಂಟು", - "9": "ಒಂಬತ್ತು", - "10": "ಹತ್ತು", - "11": "ಹನ್ನೊಂದು", - "12": "ಹನ್ನೆರಡು", - "13": "ಹದಿಮೂರು", - "14": "ಹದಿನಾಲ್ಕು", - "15": "ಹದಿನೈದು", - "16": "ಹದಿನಾರು", - "17": "ಹದಿನೇಳು", - "18": "ಹದಿನೆಂಟು", - "19": "ಹತ್ತೊಂಬತ್ತು", - "20": "ಇಪ್ಪತ್ತು", - "21": "ಇಪ್ಪತ್ತ್’ಒಂದು", - "22": "ಇಪ್ಪತ್ತ್’ಎರಡು", - "23": "ಇಪ್ಪತ್ತ್’ಮೂರು", - "24": "ಇಪ್ಪತ್ತ್’ನಾಲ್ಕು", - "25": "ಇಪ್ಪತ್ತ್’ಐದು", - "26": "ಇಪ್ಪತ್ತ್’ಆರು", - "27": "ಇಪ್ಪತ್ತ್’ಏಳು", - "28": "ಇಪ್ಪತ್ತ್’ಎಂಟು", - "29": "ಇಪ್ಪತ್ತ್’ಒಂಬತ್ತು", - "30": "ಮೂವತ್ತು", - "31": "ಮುವತ್ತ್’ಒಂದು", - "32": "ಮುವತ್ತ್’ಎರಡು", - "33": "ಮುವತ್ತ್’ಮೂರು", - "34": "ಮೂವತ್ತ್’ನಾಲ್ಕು", - "35": "ಮೂವತ್ತ್’ಐದು", - "36": "ಮೂವತ್ತ್’ಆರು", - "37": "ಮೂವತ್ತ್’ಏಳು", - "38": "ಮೂವತ್ತ್’ಎಂಟು", - "39": "ಮೂವತ್ತ್’ಒಂಬತ್ತು", - "40": "ನಲವತ್ತು", - "41": "ನಲವತ್ತೊಂದು", - "42": "ನಲವತ್ತ್ ಎರಡು", - "43": "ನಲವತ್ತ್ ಮೂರು", - "44": "ನಲವತ್ತ್ ನಾಲ್ಕು", - "45": "ನಲವತ್ತೈದು", - "46": "ನಲವತ್ತಾರು", - "47": "ನಲವತ್ತೇಳು", - "48": "ನಲವತ್ತೆಂಟು", - "49": "ನಲವತ್ತೊಂಬತ್ತು", - "50": "ಐವತ್ತು", - "51": "ಐವತ್ತೊಂದು", - "52": "ಐವತ್ತೆರಡು", - "53": "ಐವತ್ತಮೂರು", - "54": "ಐವತ್ತ್ನಾಲ್ಕು", - "55": "ಐವತ್ತೈದು", - "56": "ಐವತ್ತಾರು", - "57": "ಐವತ್ತೇಳು", - "58": "ಐವತ್ತೆಂಟು", - "59": "ಐವತ್ತೊಂಬತ್ತು", - "60": "ಅರವತ್ತು", - "61": "ಅರವತ್ತೊಂದು", - "62": "ಅರವತ್ತೆರಡು", - "63": "ಅರವತ್ತ್ ಮೂರು", - "64": "ಅರವತ್ತ್ ನಾಲ್ಕು", - "65": "ಅರವತ್ತೈದು", - "66": "ಅರವತ್ತಾರು", - "67": "ಅರವತ್ತೇಳು", - "68": "ಅರವತ್ತೆಂಟು", - "69": "ಅರವತ್ತೊಂಬತ್ತು", - "70": "ಎಪ್ಪತ್ತು", - "71": "ಎಪ್ಪತ್ತೊಂದು", - "72": "ಎಪ್ಪತ್ತೆರಡು", - "73": "ಎಪ್ಪತ್ತ್ ಮೂರು", - "74": "ಎಪ್ಪತ್ತ್ ನಾಲ್ಕು", - "75": "ಎಪ್ಪತ್ತೈದು", - "76": "ಎಪ್ಪತ್ತಾರು", - "77": "ಎಪ್ಪತ್ತೇಳು", - "78": "ಎಪ್ಪತ್ತೆಂಟು", - "79": "ಎಪ್ಪತ್ತೊಂಬತ್ತು", - "80": "ಎಂಬತ್ತು", - "81": "ಎಂಬತ್ತೊಂದು", - "82": "ಎಂಬತ್ತೆರಡು", - "83": "ಎಂಬತ್ತ್ ಮೂರು", - "84": "ಎಂಬತ್ತ್ ನಾಲ್ಕು", - "85": "ಎಂಬತ್ತೈದು", - "86": "ಎಂಬತ್ತಾರು", - "87": "ಎಂಬತ್ತೇಳು", - "88": "ಎಂಬತ್ತೆಂಟು", - "89": "ಎಂಬತ್ತೊಂಬತ್ತು", - "90": "ತೊಂಬತ್ತು", - "91": "ತೊಂಬತ್ತೊಂದು", - "92": "ತೊಂಬತ್ತೆರಡು", - "93": "ತೊಂಬತ್ತ ಮೂರು", - "94": "ತೊಂಬತ್ತ ನಾಲ್ಕು", - "95": "ತೊಂಬತ್ತೈದು", - "96": "ತೊಂಬತ್ತಾರು", - "97": "ತೊಂಬತ್ತೇಳು", - "98": "ತೊಂಬತ್ತೆಂಟು", - "99": "ತೊಂಬತ್ತೊಂಬತ್ತು", - "100": "ನೂರ", - "1000": "ಸಾವಿರದ", - "100000": "ಲಕ್ಷದ", - "10000000": "ಕೋಟಿ", - "1000000000": "ಶತಕೋಟಿ", -} # Kannada -num_dict["or"] = { - "0": "ଶୁନ୍ୟ", - "1": "ଏକ", - "2": "ଦୁଇ", - "3": "ତିନି", - "4": "ଚାରି", - "5": "ପାଞ୍ଚ", - "6": "ଛଅ", - "7": "ସାତ", - "8": "ଆଠ", - "9": "ନଅ", - "10": "ନଅ", - "11": "ଏଗାର", - "12": "ବାର", - "13": "ତେର", - "14": "ଚଉଦ", - "15": "ପନ୍ଦର", - "16": "ଷୋହଳ", - "17": "ସତର", - "18": "ଅଠର", - "19": "ଊଣାଇଶ", - "20": "କୋଡିଏ", - "21": "ଏକୋଇଶି", - "22": "ବାଇଶି", - "23": "ତେଇଶି", - "24": "ଚବିଶି", - "25": "ପଚିଶି", - "26": "ଛବିଶି", - "27": "ସତାଇଶି", - "28": "ଅଠାଇଶି", - "29": "ଅଣତିରିଶି", - "30": "ତିରିଶି", - "31": "ଏକତିରିଶି", - "32": "ବତିଶି", - "33": "ତେତିଶି", - "34": "ଚଉତିରିଶି", - "35": "ପଞ୍ଚତିରିଶି", - "36": "ଛତିଶି", - "37": "ସଂଇତିରିଶି", - "38": "ଅଠତିରିଶି", - "39": "ଅଣଚାଳିଶି", - "40": "ଚାଳିଶି", - "41": "ଏକଚାଳିଶି", - "42": "ବୟାଳିଶି", - "43": "ତେୟାଳିଶି", - "44": "ଚଉରାଳିଶି", - "45": "ପଞ୍ଚଚାଳିଶି", - "46": "ଛୟାଳିଶି", - "47": "ସତଚାଳିଶି", - "48": "ଅଠଚାଳିଶି", - "49": "ଅଣଚାଶ", - "50": "ପଚାଶ", - "51": "ଏକାବନ", - "52": "ବାଉନ", - "53": "ତେପନ", - "54": "ଚଉବନ", - "55": "ପଞ୍ଚାବନ", - "56": "ଛପନ", - "57": "ସତାବନ", - "58": "ଅଠାବନ", - "59": "ଅଣଷଠି", - "60": "ଷାଠିଏ", - "61": "ଏକଷଠି", - "62": "ବାଷଠି", - "63": "ତେଷଠି", - "64": "ଚଉଷଠି", - "65": "ପଞ୍ଚଷଠି", - "66": "ଛଅଷଠି", - "67": "ସତଷଠି", - "68": "ଅଠଷଠି", - "69": "ଅଣସ୍ତରୀ", - "70": "ସତୂରୀ", - "71": "ଏକସ୍ତରୀ", - "72": "ବାସ୍ତରୀ", - "73": "ତେସ୍ତରୀ", - "74": "ଚଉସ୍ତରୀ", - "75": "ପଞ୍ଚସ୍ତରୀ", - "76": "ଛଅସ୍ତରୀ", - "77": "ସତସ୍ତରୀ", - "78": "ଅଠସ୍ତରୀ", - "79": "ଅଣାଅଶୀ", - "80": "ଅଶୀ", - "81": "ଏକାଅଶୀ", - "82": "ବୟାଅଶୀ", - "83": "ତେୟାଅଶୀ", - "84": "ଚଉରାଅଶୀ", - "85": "ପଞ୍ଚାଅଶୀ", - "86": "ଛୟାଅଶୀ", - "87": "ସତାଅଶୀ", - "88": "ଅଠାଅଶୀ", - "89": "ଅଣାନବେ", - "90": "ନବେ", - "91": "ଏକାନବେ", - "92": "ବୟାନବେ", - "93": "ତେୟାନବେ", - "94": "ଚଉରାନବେ", - "95": "ପଞ୍ଚାନବେ", - "96": "ଛୟାନବେ", - "97": "ସତାନବେ", - "98": "ଅଠାନବେ", - "99": "ଅନେଶତ", - "100": "ଶହେ", - "1000": "ହଜାର", - "100000": "ଲକ୍ଷ", - "10000000": "କୋଟି", - "1000000000": "କୋଟି", -} # Oriya -num_dict["pa"] = { - "0": "ਸਿਫਰ ", - "1": "ਇੱਕ", - "2": "ਦੋ", - "3": "ਤਿੰਨ", - "4": "ਚਾਰ", - "5": "ਪੰਜ", - "6": "ਛੇ", - "7": "ਸੱਤ", - "8": "ਅੱਠ", - "9": "ਨੌਂ", - "10": "ਦੱਸ", - "11": "ਗਿਆਰਾਂ", - "12": "ਬਾਰਾਂ", - "13": "ਤੇਰਾਂ", - "14": "ਚੌਦਾਂ", - "15": "ਪੰਦਰਾਂ", - "16": "ਸੋਲ਼ਾਂ", - "17": "ਸਤਾਰਾਂ", - "18": "ਅਠਾਰਾਂ", - "19": "ਉਨੀ", - "20": "ਵੀਹ", - "21": "ਇੱਕੀ", - "22": "ਬਾਈ", - "23": "ਤੇਈ", - "24": "ਚੌਵੀ", - "25": "ਪੰਝੀ", - "26": "ਛੱਬੀ", - "27": "ਸਤਾਈ", - "28": "ਅਠਾਈ", - "29": "ਉਨੱਤੀ", - "30": "ਤੀਹ", - "31": "ਇਕੱਤੀ", - "32": "ਬੱਤੀ", - "33": "ਤੇਤੀ", - "34": "ਚੌਂਤੀ", - "35": "ਪੈਂਤੀ", - "36": "ਛੱਤੀ", - "37": "ਸੈਂਤੀ", - "38": "ਅਠੱਤੀ", - "39": "ਉਨਤਾਲੀ", - "40": "ਚਾਲੀ", - "41": "ਇਕਤਾਲੀ", - "42": "ਬਤਾਲੀ", - "43": "ਤਰਤਾਲੀ", - "44": "ਚੌਤਾਲੀ", - "45": "ਪੰਜਤਾਲੀ", - "46": "ਛਿਆਲੀ", - "47": "ਸੰਤਾਲੀ", - "48": "ਅੱਠਤਾਲੀ", - "49": "ਉਣਿੰਜਾ", - "50": "ਪੰਜਾਹ", - "51": "ਇਕਵਿੰਜਾ", - "52": "ਬਵਿੰਜਾ", - "53": "ਤਰਵਿੰਜਾ", - "54": "ਚਰਿੰਜਾ", - "55": "ਪਚਵਿੰਜਾ", - "56": "ਛਪਿੰਜਾ", - "57": "ਸਤਵਿੰਜਾ", - "58": "ਅੱਠਵਿੰਜਾ", - "59": "ਉਣਾਠ", - "60": "ਸੱਠ", - "61": "ਇਕਾਠ", - "62": "ਬਾਠ੍ਹ", - "63": "ਤਰੇਠ੍ਹ", - "64": "ਚੌਠ੍ਹ", - "65": "ਪੈਂਠ", - "66": "ਛਿਆਠ", - "67": "ਸਤਾਹਠ", - "68": "ਅੱਠਾਠ", - "69": "ਉਣੱਤਰ", - "70": "ਸੱਤਰ", - "71": "ਇਕ੍ਹੱਤਰ", - "72": "ਬਹੱਤਰ", - "73": "ਤਹੱਤਰ", - "74": "ਚੌਹੱਤਰ", - "75": "ਪੰਜੱਤਰ", - "76": "ਛਿਹੱਤਰ", - "77": "ਸਤੱਤਰ", - "78": "ਅਠੱਤਰ", - "79": "ਉਣਾਸੀ", - "80": "ਅੱਸੀ", - "81": "ਇਕਾਸੀ", - "82": "ਬਿਆਸੀ", - "83": "ਤਰਾਸੀ", - "84": "ਚਰਾਸੀ", - "85": "ਪੰਜਾਸੀ", - "86": "ਛਿਆਸੀ", - "87": "ਸਤਾਸੀ", - "88": "ਅਠਾਸੀ", - "89": "ਉਣਾਨਵੇਂ", - "90": "ਨੱਬੇ", - "91": "ਇਕਾਨਵੇਂ", - "92": "ਬਿਆਨਵੇਂ", - "93": "ਤਰਾਨਵੇਂ", - "94": "ਚਰਾਨਵੇਂ", - "95": "ਪਚਾਨਵੇਂ", - "96": "ਛਿਆਨਵੇਂ", - "97": "ਸਤਾਨਵੇਂ", - "98": "ਅਠਾਨਵੇਂ", - "99": "ਨਿੜਾਨਵੇਂ", - "100": "ਸੌ", - "1000": "ਹਜਾਰ", - "100000": "ਲੱਖ", - "10000000": "ਕਰੋੜ", - "1000000000": "ਅਰਬ", -} # Punjabi - -# --------------------------- num_to_word.py ------------------------------ -""" -Method to convert Numbers to Words -for indian languages - -Use cases:- -1) Speech recognition pre-processing -2) Language modeling Data pre-processing - -------------------------- -check indic_numbers.py to add support -for any indian language -""" - - -def language_specific_exception(words, lang, combiner): - """ - Language Specific Exception will come here - """ - - def occurs_at_end(piece): - return words[-len(piece) :] == piece - - if lang == "mr": - words = words.replace("एक" + combiner + "शे", "शंभर") - elif lang == "gu": - words = words.replace("બે" + combiner + "સો", "બસ્સો") - elif lang == "te": - exception_dict = { - "1": "ఒక", - "100": "వంద", - "100+": "వందలు", - "1000": "వెయ్యి", - "1000+": "వేలు", - "100000": "లక్ష", - "100000+": "లక్షలు", - "10000000": "కోటి", - "10000000+": "కోట్లు", - } - - test_case = ["100", "1000", "100000", "10000000"] - for test in test_case: - test_word = num_dict["te"][test] - match = num_dict["te"]["1"] + combiner + test_word - # for numbers like : 100, 1000, 100000 - if words == match: - return exception_dict[test] - # for numbers like : 200, 4000, 800000 - elif occurs_at_end(test_word): - words = words.replace(test_word, exception_dict[test + "+"]) - # for numbers like : 105, 1076, 123993 - elif not occurs_at_end(match): - replacement = exception_dict["1"] + combiner + exception_dict[test] - words = words.replace(match, replacement) - - # Exception case for 101...199 - special_case = "ఒక" + combiner + "వంద" - words = words.replace(special_case, "నూట") - elif lang == "kn": - # special case for 100 - if words == ("ಒಂದು" + combiner + "ನೂರ"): - return "ನೂರು" - exception_dict = { - "ನೂರ": "ನೂರು", - "ಸಾವಿರದ": "ಸಾವಿರ", - "ಲಕ್ಷದ": "ಲಕ್ಷ", - "ಕೋಟಿಯ": "ಕೋಟಿ", - } - for expt in exception_dict: - if occurs_at_end(expt): - words = words.replace(expt, exception_dict[expt]) - return words - - -def num_to_word(num, lang, separator=", ", combiner=" "): - """ - Main Method - :param num: Number digits from any indian language - :param lang: Language Code from supported Language - :param separator: Separator character i.e. separator = '-' --> 'two hundred-sixty' - :param combiner: combine number with position i.e. combiner = '-' --> 'two-hundred sixty' - :return: UTF-8 String of numbers in words - """ - lang = lang.lower() - num = str(num) - - # Load dictionary according to language code - assert lang in supported_lang, "Language not supported" - num_dic = num_dict[lang] - - # dash default combiner for english-india - if (lang == "en") & (combiner == " "): - combiner = "-" - - # Remove punctuations from numbers - num = str(num).replace(",", "").replace(" ", "") - - # Replace native language numbers with english digits - for language in supported_lang: - for num_index in range(10): - num = num.replace(all_num[language][num_index], all_num["en"][num_index]) - - # Assert that input contains only integer number - for digit in num: - assert digit in all_num["en"], "Give proper input" - - # Process - # For Number longer than 9 digits - def all_two_digit(digits_2): - if len(digits_2) <= 1: # Provided only one/zero digit - return num_dic.get(digits_2, "") - elif digits_2 == "00": # Two Zero provided - return num_dic["0"] + separator + num_dic["0"] - elif digits_2[0] == "0": # First digit is zero - return num_dic["0"] + separator + num_dic[digits_2[1]] - else: # Both digit provided - return num_dic[digits_2] - - # For Number less than 9 digits - def two_digit(digits_2): - digits_2 = digits_2.lstrip("0") - if len(digits_2) != 0: - return num_dic[digits_2] - else: - return "" - - def all_digit(digits): - digits = digits.lstrip("0") - digit_len = len(digits) - if digit_len > 3: - num_of_digits_to_process = (digit_len % 2) + 1 - process_digits = digits[:num_of_digits_to_process] - base = str(10 ** (int(digit_len / 2) * 2 - 1)) - remain_digits = digits[num_of_digits_to_process:] - return ( - num_dic[process_digits] - + combiner - + num_dic[base] - + separator - + all_digit(remain_digits) - ) - elif len(digits) == 3: - return ( - num_dic[digits[:1]] - + combiner - + num_dic["100"] - + separator - + two_digit(digits[1:]) - ) - else: - return two_digit(digits) - - num = num.lstrip("0") - full_digit_len = len(num) - - if full_digit_len == 0: - output = num_dic["0"] - elif full_digit_len <= 9: - output = all_digit(num) - else: - iteration = round(full_digit_len / 2) - output = all_two_digit(num[:2]) # First to digit - for i in range(1, iteration): - output = ( - output + separator + all_two_digit(num[i * 2 : (i + 1) * 2]) - ) # Next two digit pairs - remaining_digits = num[iteration * 2 :] - if not all_two_digit(remaining_digits) == "": - output = ( - output + separator + all_two_digit(remaining_digits) - ) # remaining Last one/two digits - - output = output.strip(separator) - - output = language_specific_exception(output, lang, combiner) - - return output - - -# --------------------------------- num_to_word_on_a_sent --------------------------------- - - -def is_digit(word, digit_pattern): - return re.search(digit_pattern, word) - - -def remove_punct(sent): - clean = re.sub("[%s]" % re.escape(string.punctuation), " ", sent) - return " ".join([word for word in clean.split() if word]) - - -def normalize_nums(text, lang): - """ - text: str (eg) - lang: lang code ['en', 'hi'] - - returns: str - (eg) - """ - - if lang in supported_lang: - words = text.split() - lang_digits = [str(i) for i in range(0, 10)] - - digit_pattern = "[" + "".join(lang_digits) + "]" - num_indices = [ - ind for ind, word in enumerate(words) if is_digit(word, digit_pattern) - ] - - words_up = [ - num_to_word(word, lang, separator=" ", combiner=" ") - if ind in num_indices - else word - for ind, word in enumerate(words) - ] - return " ".join(words_up) - else: - return text - - -if __name__ == "__main__": - print(normalize_nums("रीटा के पास 16 बिल्लियाँ हैं।", "hi")) diff --git a/spaces/Harveenchadha/en_to_indic_translation/scripts/preprocess_translate.py b/spaces/Harveenchadha/en_to_indic_translation/scripts/preprocess_translate.py deleted file mode 100644 index 8fbe3c275f7cb655d95125256260190d51b35ca7..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/en_to_indic_translation/scripts/preprocess_translate.py +++ /dev/null @@ -1,172 +0,0 @@ -INDIC_NLP_LIB_HOME = "indic_nlp_library" -INDIC_NLP_RESOURCES = "indic_nlp_resources" -import sys - -sys.path.append(r"{}".format(INDIC_NLP_LIB_HOME)) -from indicnlp import common - -common.set_resources_path(INDIC_NLP_RESOURCES) -from indicnlp import loader - -loader.load() -from sacremoses import MosesPunctNormalizer -from sacremoses import MosesTokenizer -from sacremoses import MosesDetokenizer -from collections import defaultdict - -from tqdm import tqdm -from joblib import Parallel, delayed - -from indicnlp.tokenize import indic_tokenize -from indicnlp.tokenize import indic_detokenize -from indicnlp.normalize import indic_normalize -from indicnlp.transliterate import unicode_transliterate - - -en_tok = MosesTokenizer(lang="en") -en_normalizer = MosesPunctNormalizer() - - -def preprocess_line(line, normalizer, lang, transliterate=False): - if lang == "en": - return " ".join( - en_tok.tokenize(en_normalizer.normalize(line.strip()), escape=False) - ) - elif transliterate: - # line = indic_detokenize.trivial_detokenize(line.strip(), lang) - return unicode_transliterate.UnicodeIndicTransliterator.transliterate( - " ".join( - indic_tokenize.trivial_tokenize( - normalizer.normalize(line.strip()), lang - ) - ), - lang, - "hi", - ).replace(" ् ", "्") - else: - # we only need to transliterate for joint training - return " ".join( - indic_tokenize.trivial_tokenize(normalizer.normalize(line.strip()), lang) - ) - - -def preprocess(infname, outfname, lang, transliterate=False): - """ - Normalize, tokenize and script convert(for Indic) - return number of sentences input file - - """ - - n = 0 - num_lines = sum(1 for line in open(infname, "r")) - if lang == "en": - with open(infname, "r", encoding="utf-8") as infile, open( - outfname, "w", encoding="utf-8" - ) as outfile: - - out_lines = Parallel(n_jobs=-1, backend="multiprocessing")( - delayed(preprocess_line)(line, None, lang) - for line in tqdm(infile, total=num_lines) - ) - - for line in out_lines: - outfile.write(line + "\n") - n += 1 - - else: - normfactory = indic_normalize.IndicNormalizerFactory() - normalizer = normfactory.get_normalizer(lang) - # reading - with open(infname, "r", encoding="utf-8") as infile, open( - outfname, "w", encoding="utf-8" - ) as outfile: - - out_lines = Parallel(n_jobs=-1, backend="multiprocessing")( - delayed(preprocess_line)(line, normalizer, lang, transliterate) - for line in tqdm(infile, total=num_lines) - ) - - for line in out_lines: - outfile.write(line + "\n") - n += 1 - return n - - -def old_preprocess(infname, outfname, lang): - """ - Preparing each corpus file: - - Normalization - - Tokenization - - Script coversion to Devanagari for Indic scripts - """ - n = 0 - num_lines = sum(1 for line in open(infname, "r")) - # reading - with open(infname, "r", encoding="utf-8") as infile, open( - outfname, "w", encoding="utf-8" - ) as outfile: - - if lang == "en": - en_tok = MosesTokenizer(lang="en") - en_normalizer = MosesPunctNormalizer() - for line in tqdm(infile, total=num_lines): - outline = " ".join( - en_tok.tokenize(en_normalizer.normalize(line.strip()), escape=False) - ) - outfile.write(outline + "\n") - n += 1 - - else: - normfactory = indic_normalize.IndicNormalizerFactory() - normalizer = normfactory.get_normalizer(lang) - for line in tqdm(infile, total=num_lines): - outline = ( - unicode_transliterate.UnicodeIndicTransliterator.transliterate( - " ".join( - indic_tokenize.trivial_tokenize( - normalizer.normalize(line.strip()), lang - ) - ), - lang, - "hi", - ).replace(" ् ", "्") - ) - - outfile.write(outline + "\n") - n += 1 - return n - - -if __name__ == "__main__": - - # INDIC_NLP_LIB_HOME = "indic_nlp_library" - # INDIC_NLP_RESOURCES = "indic_nlp_resources" - # sys.path.append(r'{}'.format(INDIC_NLP_LIB_HOME)) - # common.set_resources_path(INDIC_NLP_RESOURCES) - - # data_dir = '../joint_training/v1' - # new_dir = data_dir + '.norm' - # for path, subdirs, files in os.walk(data_dir): - # for name in files: - # infile = os.path.join(path, name) - # lang = infile.split('.')[-1] - # outfile = os.path.join(path.replace(data_dir, new_dir), name) - # preprocess(infile, outfile, lang) - # loader.load() - - infname = sys.argv[1] - outfname = sys.argv[2] - lang = sys.argv[3] - - if len(sys.argv) == 4: - transliterate = False - elif len(sys.argv) == 5: - transliterate = sys.argv[4] - if transliterate.lower() == "true": - transliterate = True - else: - transliterate = False - else: - print(f"Invalid arguments: {sys.argv}") - exit() - print(preprocess(infname, outfname, lang, transliterate)) diff --git a/spaces/Harveenchadha/oiTrans/subword-nmt/subword_nmt/learn_bpe.py b/spaces/Harveenchadha/oiTrans/subword-nmt/subword_nmt/learn_bpe.py deleted file mode 100644 index 7b01f046fa6b3fd8ba64b7658c23b6f80a4e6ba3..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/oiTrans/subword-nmt/subword_nmt/learn_bpe.py +++ /dev/null @@ -1,367 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Author: Rico Sennrich - -"""Use byte pair encoding (BPE) to learn a variable-length encoding of the vocabulary in a text. -Unlike the original BPE, it does not compress the plain text, but can be used to reduce the vocabulary -of a text to a configurable number of symbols, with only a small increase in the number of tokens. - -Reference: -Rico Sennrich, Barry Haddow and Alexandra Birch (2016). Neural Machine Translation of Rare Words with Subword Units. -Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany. -""" - -from __future__ import unicode_literals - -import os -import sys -import inspect -import codecs -import re -import copy -import argparse -import warnings -import tempfile -from multiprocessing import Pool, cpu_count -from collections import defaultdict, Counter - -try: - from tqdm import tqdm -except ImportError: - def tqdm(iterator, *args, **kwargs): - return iterator - -# hack for python2/3 compatibility -from io import open -argparse.open = open - -def create_parser(subparsers=None): - - if subparsers: - parser = subparsers.add_parser('learn-bpe', - formatter_class=argparse.RawDescriptionHelpFormatter, - description="learn BPE-based word segmentation") - else: - parser = argparse.ArgumentParser( - formatter_class=argparse.RawDescriptionHelpFormatter, - description="learn BPE-based word segmentation") - - parser.add_argument( - '--input', '-i', type=argparse.FileType('r'), default=sys.stdin, - metavar='PATH', - help="Input text (default: standard input).") - - parser.add_argument( - '--output', '-o', type=argparse.FileType('w'), default=sys.stdout, - metavar='PATH', - help="Output file for BPE codes (default: standard output)") - parser.add_argument( - '--symbols', '-s', type=int, default=10000, - help="Create this many new symbols (each representing a character n-gram) (default: %(default)s)") - parser.add_argument( - '--min-frequency', type=int, default=2, metavar='FREQ', - help='Stop if no symbol pair has frequency >= FREQ (default: %(default)s)') - parser.add_argument('--dict-input', action="store_true", - help="If set, input file is interpreted as a dictionary where each line contains a word-count pair") - parser.add_argument( - '--total-symbols', '-t', action="store_true", - help="subtract number of characters from the symbols to be generated (so that '--symbols' becomes an estimate for the total number of symbols needed to encode text).") - parser.add_argument( - '--num-workers', type=int, default=1, - help="Number of processors to process texts, only supported in Python3. If -1, set `multiprocessing.cpu_count()`. (default: %(default)s)") - parser.add_argument( - '--verbose', '-v', action="store_true", - help="verbose mode.") - - return parser - -def get_vocabulary(fobj, is_dict=False, num_workers=1): - """Read text and return dictionary that encodes vocabulary - """ - vocab = Counter() - if is_dict: - for i, line in enumerate(fobj): - try: - word, count = line.strip('\r\n ').split(' ') - except: - print('Failed reading vocabulary file at line {0}: {1}'.format(i, line)) - sys.exit(1) - vocab[word] += int(count) - elif num_workers == 1 or fobj.name == '': - if num_workers > 1: - warnings.warn("In parallel mode, the input cannot be STDIN. Using 1 processor instead.") - for i, line in enumerate(fobj): - for word in line.strip('\r\n ').split(' '): - if word: - vocab[word] += 1 - elif num_workers > 1: - - if sys.version_info < (3, 0): - print("Parallel mode is only supported in Python3.") - sys.exit(1) - - with open(fobj.name, encoding="utf8") as f: - size = os.fstat(f.fileno()).st_size - chunk_size = int(size / num_workers) - offsets = [0 for _ in range(num_workers + 1)] - for i in range(1, num_workers): - f.seek(chunk_size * i) - pos = f.tell() - while True: - try: - line = f.readline() - break - except UnicodeDecodeError: - pos -= 1 - f.seek(pos) - offsets[i] = f.tell() - assert 0 <= offsets[i] < 1e20, "Bad new line separator, e.g. '\\r'" - - vocab_files = [] - pool = Pool(processes=num_workers) - for i in range(num_workers): - tmp = tempfile.NamedTemporaryFile(delete=False) - tmp.close() - vocab_files.append(tmp) - pool.apply_async(_get_vocabulary, (fobj.name, tmp.name, offsets[i], offsets[i + 1])) - pool.close() - pool.join() - import pickle - for i in range(num_workers): - with open(vocab_files[i].name, 'rb') as f: - vocab += pickle.load(f) - os.remove(vocab_files[i].name) - else: - raise ValueError('`num_workers` is expected to be a positive number, but got {}.'.format(num_workers)) - return vocab - -def _get_vocabulary(infile, outfile, begin, end): - import pickle - vocab = Counter() - with open(infile, encoding="utf8") as f: - f.seek(begin) - line = f.readline() - while line: - pos = f.tell() - assert 0 <= pos < 1e20, "Bad new line separator, e.g. '\\r'" - if end > 0 and pos > end: - break - for word in line.strip('\r\n ').split(' '): - if word: - vocab[word] += 1 - line = f.readline() - with open(outfile, 'wb') as f: - pickle.dump(vocab, f) - -def update_pair_statistics(pair, changed, stats, indices): - """Minimally update the indices and frequency of symbol pairs - - if we merge a pair of symbols, only pairs that overlap with occurrences - of this pair are affected, and need to be updated. - """ - stats[pair] = 0 - indices[pair] = defaultdict(int) - first, second = pair - new_pair = first+second - for j, word, old_word, freq in changed: - - # find all instances of pair, and update frequency/indices around it - i = 0 - while True: - # find first symbol - try: - i = old_word.index(first, i) - except ValueError: - break - # if first symbol is followed by second symbol, we've found an occurrence of pair (old_word[i:i+2]) - if i < len(old_word)-1 and old_word[i+1] == second: - # assuming a symbol sequence "A B C", if "B C" is merged, reduce the frequency of "A B" - if i: - prev = old_word[i-1:i+1] - stats[prev] -= freq - indices[prev][j] -= 1 - if i < len(old_word)-2: - # assuming a symbol sequence "A B C B", if "B C" is merged, reduce the frequency of "C B". - # however, skip this if the sequence is A B C B C, because the frequency of "C B" will be reduced by the previous code block - if old_word[i+2] != first or i >= len(old_word)-3 or old_word[i+3] != second: - nex = old_word[i+1:i+3] - stats[nex] -= freq - indices[nex][j] -= 1 - i += 2 - else: - i += 1 - - i = 0 - while True: - try: - # find new pair - i = word.index(new_pair, i) - except ValueError: - break - # assuming a symbol sequence "A BC D", if "B C" is merged, increase the frequency of "A BC" - if i: - prev = word[i-1:i+1] - stats[prev] += freq - indices[prev][j] += 1 - # assuming a symbol sequence "A BC B", if "B C" is merged, increase the frequency of "BC B" - # however, if the sequence is A BC BC, skip this step because the count of "BC BC" will be incremented by the previous code block - if i < len(word)-1 and word[i+1] != new_pair: - nex = word[i:i+2] - stats[nex] += freq - indices[nex][j] += 1 - i += 1 - - -def get_pair_statistics(vocab): - """Count frequency of all symbol pairs, and create index""" - - # data structure of pair frequencies - stats = defaultdict(int) - - #index from pairs to words - indices = defaultdict(lambda: defaultdict(int)) - - for i, (word, freq) in enumerate(vocab): - prev_char = word[0] - for char in word[1:]: - stats[prev_char, char] += freq - indices[prev_char, char][i] += 1 - prev_char = char - - return stats, indices - - -def replace_pair(pair, vocab, indices): - """Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB'""" - first, second = pair - pair_str = ''.join(pair) - pair_str = pair_str.replace('\\','\\\\') - changes = [] - pattern = re.compile(r'(?'); - # version numbering allows bckward compatibility - outfile.write('#version: 0.2\n') - - vocab = get_vocabulary(infile, is_dict, num_workers) - vocab = dict([(tuple(x[:-1])+(x[-1]+'',) ,y) for (x,y) in vocab.items()]) - sorted_vocab = sorted(vocab.items(), key=lambda x: x[1], reverse=True) - - stats, indices = get_pair_statistics(sorted_vocab) - big_stats = copy.deepcopy(stats) - - if total_symbols: - uniq_char_internal = set() - uniq_char_final = set() - for word in vocab: - for char in word[:-1]: - uniq_char_internal.add(char) - uniq_char_final.add(word[-1]) - sys.stderr.write('Number of word-internal characters: {0}\n'.format(len(uniq_char_internal))) - sys.stderr.write('Number of word-final characters: {0}\n'.format(len(uniq_char_final))) - sys.stderr.write('Reducing number of merge operations by {0}\n'.format(len(uniq_char_internal) + len(uniq_char_final))) - num_symbols -= len(uniq_char_internal) + len(uniq_char_final) - - # threshold is inspired by Zipfian assumption, but should only affect speed - threshold = max(stats.values()) / 10 - for i in tqdm(range(num_symbols)): - if stats: - most_frequent = max(stats, key=lambda x: (stats[x], x)) - - # we probably missed the best pair because of pruning; go back to full statistics - if not stats or (i and stats[most_frequent] < threshold): - prune_stats(stats, big_stats, threshold) - stats = copy.deepcopy(big_stats) - most_frequent = max(stats, key=lambda x: (stats[x], x)) - # threshold is inspired by Zipfian assumption, but should only affect speed - threshold = stats[most_frequent] * i/(i+10000.0) - prune_stats(stats, big_stats, threshold) - - if stats[most_frequent] < min_frequency: - sys.stderr.write('no pair has frequency >= {0}. Stopping\n'.format(min_frequency)) - break - - if verbose: - sys.stderr.write('pair {0}: {1} {2} -> {1}{2} (frequency {3})\n'.format(i, most_frequent[0], most_frequent[1], stats[most_frequent])) - outfile.write('{0} {1}\n'.format(*most_frequent)) - changes = replace_pair(most_frequent, sorted_vocab, indices) - update_pair_statistics(most_frequent, changes, stats, indices) - stats[most_frequent] = 0 - if not i % 100: - prune_stats(stats, big_stats, threshold) - - -if __name__ == '__main__': - - currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) - newdir = os.path.join(currentdir, 'subword_nmt') - if os.path.isdir(newdir): - warnings.simplefilter('default') - warnings.warn( - "this script's location has moved to {0}. This symbolic link will be removed in a future version. Please point to the new location, or install the package and use the command 'subword-nmt'".format(newdir), - DeprecationWarning - ) - - # python 2/3 compatibility - if sys.version_info < (3, 0): - sys.stderr = codecs.getwriter('UTF-8')(sys.stderr) - sys.stdout = codecs.getwriter('UTF-8')(sys.stdout) - sys.stdin = codecs.getreader('UTF-8')(sys.stdin) - else: - sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer) - sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer) - sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer) - - parser = create_parser() - args = parser.parse_args() - - if args.num_workers <= 0: - args.num_workers = cpu_count() - - if sys.version_info < (3, 0) and args.num_workers > 1: - args.num_workers = 1 - warnings.warn("Parallel mode is only supported in Python3. Using 1 processor instead.") - - # read/write files as UTF-8 - if args.input.name != '': - args.input = codecs.open(args.input.name, encoding='utf-8') - if args.output.name != '': - args.output = codecs.open(args.output.name, 'w', encoding='utf-8') - - learn_bpe(args.input, args.output, args.symbols, args.min_frequency, args.verbose, is_dict=args.dict_input, total_symbols=args.total_symbols, num_workers=args.num_workers) diff --git a/spaces/Hoodady/3DFuse/ldm/modules/diffusionmodules/upscaling.py b/spaces/Hoodady/3DFuse/ldm/modules/diffusionmodules/upscaling.py deleted file mode 100644 index 03816662098ce1ffac79bd939b892e867ab91988..0000000000000000000000000000000000000000 --- a/spaces/Hoodady/3DFuse/ldm/modules/diffusionmodules/upscaling.py +++ /dev/null @@ -1,81 +0,0 @@ -import torch -import torch.nn as nn -import numpy as np -from functools import partial - -from ldm.modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule -from ldm.util import default - - -class AbstractLowScaleModel(nn.Module): - # for concatenating a downsampled image to the latent representation - def __init__(self, noise_schedule_config=None): - super(AbstractLowScaleModel, self).__init__() - if noise_schedule_config is not None: - self.register_schedule(**noise_schedule_config) - - def register_schedule(self, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, - cosine_s=cosine_s) - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - - def forward(self, x): - return x, None - - def decode(self, x): - return x - - -class SimpleImageConcat(AbstractLowScaleModel): - # no noise level conditioning - def __init__(self): - super(SimpleImageConcat, self).__init__(noise_schedule_config=None) - self.max_noise_level = 0 - - def forward(self, x): - # fix to constant noise level - return x, torch.zeros(x.shape[0], device=x.device).long() - - -class ImageConcatWithNoiseAugmentation(AbstractLowScaleModel): - def __init__(self, noise_schedule_config, max_noise_level=1000, to_cuda=False): - super().__init__(noise_schedule_config=noise_schedule_config) - self.max_noise_level = max_noise_level - - def forward(self, x, noise_level=None): - if noise_level is None: - noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long() - else: - assert isinstance(noise_level, torch.Tensor) - z = self.q_sample(x, noise_level) - return z, noise_level - - - diff --git a/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/text/korean.py b/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/text/korean.py deleted file mode 100644 index edee07429a450c55e3d8e246997faaa1e0b89cc9..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/text/korean.py +++ /dev/null @@ -1,210 +0,0 @@ -import re -from jamo import h2j, j2hcj -import ko_pron - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (ipa, lazy ipa) pairs: -_ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('t͡ɕ','ʧ'), - ('d͡ʑ','ʥ'), - ('ɲ','n^'), - ('ɕ','ʃ'), - ('ʷ','w'), - ('ɭ','l`'), - ('ʎ','ɾ'), - ('ɣ','ŋ'), - ('ɰ','ɯ'), - ('ʝ','j'), - ('ʌ','ə'), - ('ɡ','g'), - ('\u031a','#'), - ('\u0348','='), - ('\u031e',''), - ('\u0320',''), - ('\u0339','') -]] - - -def latin_to_hangul(text): - for regex, replacement in _latin_to_hangul: - text = re.sub(regex, replacement, text) - return text - - -def divide_hangul(text): - text = j2hcj(h2j(text)) - for regex, replacement in _hangul_divided: - text = re.sub(regex, replacement, text) - return text - - -def hangul_number(num, sino=True): - '''Reference https://github.com/Kyubyong/g2pK''' - num = re.sub(',', '', num) - - if num == '0': - return '영' - if not sino and num == '20': - return '스무' - - digits = '123456789' - names = '일이삼사오육칠팔구' - digit2name = {d: n for d, n in zip(digits, names)} - - modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉' - decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔' - digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())} - digit2dec = {d: dec for d, dec in zip(digits, decimals.split())} - - spelledout = [] - for i, digit in enumerate(num): - i = len(num) - i - 1 - if sino: - if i == 0: - name = digit2name.get(digit, '') - elif i == 1: - name = digit2name.get(digit, '') + '십' - name = name.replace('일십', '십') - else: - if i == 0: - name = digit2mod.get(digit, '') - elif i == 1: - name = digit2dec.get(digit, '') - if digit == '0': - if i % 4 == 0: - last_three = spelledout[-min(3, len(spelledout)):] - if ''.join(last_three) == '': - spelledout.append('') - continue - else: - spelledout.append('') - continue - if i == 2: - name = digit2name.get(digit, '') + '백' - name = name.replace('일백', '백') - elif i == 3: - name = digit2name.get(digit, '') + '천' - name = name.replace('일천', '천') - elif i == 4: - name = digit2name.get(digit, '') + '만' - name = name.replace('일만', '만') - elif i == 5: - name = digit2name.get(digit, '') + '십' - name = name.replace('일십', '십') - elif i == 6: - name = digit2name.get(digit, '') + '백' - name = name.replace('일백', '백') - elif i == 7: - name = digit2name.get(digit, '') + '천' - name = name.replace('일천', '천') - elif i == 8: - name = digit2name.get(digit, '') + '억' - elif i == 9: - name = digit2name.get(digit, '') + '십' - elif i == 10: - name = digit2name.get(digit, '') + '백' - elif i == 11: - name = digit2name.get(digit, '') + '천' - elif i == 12: - name = digit2name.get(digit, '') + '조' - elif i == 13: - name = digit2name.get(digit, '') + '십' - elif i == 14: - name = digit2name.get(digit, '') + '백' - elif i == 15: - name = digit2name.get(digit, '') + '천' - spelledout.append(name) - return ''.join(elem for elem in spelledout) - - -def number_to_hangul(text): - '''Reference https://github.com/Kyubyong/g2pK''' - tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text)) - for token in tokens: - num, classifier = token - if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers: - spelledout = hangul_number(num, sino=False) - else: - spelledout = hangul_number(num, sino=True) - text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}') - # digit by digit for remaining digits - digits = '0123456789' - names = '영일이삼사오육칠팔구' - for d, n in zip(digits, names): - text = text.replace(d, n) - return text - - -def korean_to_lazy_ipa(text): - text = latin_to_hangul(text) - text = number_to_hangul(text) - text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text) - for regex, replacement in _ipa_to_lazy_ipa: - text = re.sub(regex, replacement, text) - return text - - -def korean_to_ipa(text): - text = korean_to_lazy_ipa(text) - return text.replace('ʧ','tʃ').replace('ʥ','dʑ') diff --git a/spaces/Ikaros521/so-vits-svc-4.0-ikaros/modules/ddsp.py b/spaces/Ikaros521/so-vits-svc-4.0-ikaros/modules/ddsp.py deleted file mode 100644 index b09ac5c5c19d165e75e1780877a857be8c104ed7..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/so-vits-svc-4.0-ikaros/modules/ddsp.py +++ /dev/null @@ -1,190 +0,0 @@ -import torch -import torch.nn as nn -from torch.nn import functional as F -import torch.fft as fft -import numpy as np -import librosa as li -import math -from scipy.signal import get_window - - -def safe_log(x): - return torch.log(x + 1e-7) - - -@torch.no_grad() -def mean_std_loudness(dataset): - mean = 0 - std = 0 - n = 0 - for _, _, l in dataset: - n += 1 - mean += (l.mean().item() - mean) / n - std += (l.std().item() - std) / n - return mean, std - - -def multiscale_fft(signal, scales, overlap): - stfts = [] - for s in scales: - S = torch.stft( - signal, - s, - int(s * (1 - overlap)), - s, - torch.hann_window(s).to(signal), - True, - normalized=True, - return_complex=True, - ).abs() - stfts.append(S) - return stfts - - -def resample(x, factor: int): - batch, frame, channel = x.shape - x = x.permute(0, 2, 1).reshape(batch * channel, 1, frame) - - window = torch.hann_window( - factor * 2, - dtype=x.dtype, - device=x.device, - ).reshape(1, 1, -1) - y = torch.zeros(x.shape[0], x.shape[1], factor * x.shape[2]).to(x) - y[..., ::factor] = x - y[..., -1:] = x[..., -1:] - y = torch.nn.functional.pad(y, [factor, factor]) - y = torch.nn.functional.conv1d(y, window)[..., :-1] - - y = y.reshape(batch, channel, factor * frame).permute(0, 2, 1) - - return y - - -def upsample(signal, factor): - signal = signal.permute(0, 2, 1) - signal = nn.functional.interpolate(signal, size=signal.shape[-1] * factor) - return signal.permute(0, 2, 1) - - -def remove_above_nyquist(amplitudes, pitch, sampling_rate): - n_harm = amplitudes.shape[-1] - pitches = pitch * torch.arange(1, n_harm + 1).to(pitch) - aa = (pitches < sampling_rate / 2).float() + 1e-4 - return amplitudes * aa - - -def scale_function(x): - return 2 * torch.sigmoid(x) ** (math.log(10)) + 1e-7 - - -def extract_loudness(signal, sampling_rate, block_size, n_fft=2048): - S = li.stft( - signal, - n_fft=n_fft, - hop_length=block_size, - win_length=n_fft, - center=True, - ) - S = np.log(abs(S) + 1e-7) - f = li.fft_frequencies(sampling_rate, n_fft) - a_weight = li.A_weighting(f) - - S = S + a_weight.reshape(-1, 1) - - S = np.mean(S, 0)[..., :-1] - - return S - - -def extract_pitch(signal, sampling_rate, block_size): - length = signal.shape[-1] // block_size - f0 = crepe.predict( - signal, - sampling_rate, - step_size=int(1000 * block_size / sampling_rate), - verbose=1, - center=True, - viterbi=True, - ) - f0 = f0[1].reshape(-1)[:-1] - - if f0.shape[-1] != length: - f0 = np.interp( - np.linspace(0, 1, length, endpoint=False), - np.linspace(0, 1, f0.shape[-1], endpoint=False), - f0, - ) - - return f0 - - -def mlp(in_size, hidden_size, n_layers): - channels = [in_size] + (n_layers) * [hidden_size] - net = [] - for i in range(n_layers): - net.append(nn.Linear(channels[i], channels[i + 1])) - net.append(nn.LayerNorm(channels[i + 1])) - net.append(nn.LeakyReLU()) - return nn.Sequential(*net) - - -def gru(n_input, hidden_size): - return nn.GRU(n_input * hidden_size, hidden_size, batch_first=True) - - -def harmonic_synth(pitch, amplitudes, sampling_rate): - n_harmonic = amplitudes.shape[-1] - omega = torch.cumsum(2 * math.pi * pitch / sampling_rate, 1) - omegas = omega * torch.arange(1, n_harmonic + 1).to(omega) - signal = (torch.sin(omegas) * amplitudes).sum(-1, keepdim=True) - return signal - - -def amp_to_impulse_response(amp, target_size): - amp = torch.stack([amp, torch.zeros_like(amp)], -1) - amp = torch.view_as_complex(amp) - amp = fft.irfft(amp) - - filter_size = amp.shape[-1] - - amp = torch.roll(amp, filter_size // 2, -1) - win = torch.hann_window(filter_size, dtype=amp.dtype, device=amp.device) - - amp = amp * win - - amp = nn.functional.pad(amp, (0, int(target_size) - int(filter_size))) - amp = torch.roll(amp, -filter_size // 2, -1) - - return amp - - -def fft_convolve(signal, kernel): - signal = nn.functional.pad(signal, (0, signal.shape[-1])) - kernel = nn.functional.pad(kernel, (kernel.shape[-1], 0)) - - output = fft.irfft(fft.rfft(signal) * fft.rfft(kernel)) - output = output[..., output.shape[-1] // 2:] - - return output - - -def init_kernels(win_len, win_inc, fft_len, win_type=None, invers=False): - if win_type == 'None' or win_type is None: - window = np.ones(win_len) - else: - window = get_window(win_type, win_len, fftbins=True) # **0.5 - - N = fft_len - fourier_basis = np.fft.rfft(np.eye(N))[:win_len] - real_kernel = np.real(fourier_basis) - imag_kernel = np.imag(fourier_basis) - kernel = np.concatenate([real_kernel, imag_kernel], 1).T - - if invers: - kernel = np.linalg.pinv(kernel).T - - kernel = kernel * window - kernel = kernel[:, None, :] - return torch.from_numpy(kernel.astype(np.float32)), torch.from_numpy(window[None, :, None].astype(np.float32)) - diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py b/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py deleted file mode 100644 index d77e71653078dfb206f267f889334d1ed7b7da8b..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +++ /dev/null @@ -1,461 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from typing import Callable, List, Optional, Union - -import torch - -import PIL -from diffusers.utils import is_accelerate_available -from packaging import version -from transformers import CLIPFeatureExtractor, CLIPVisionModelWithProjection - -from ...configuration_utils import FrozenDict -from ...models import AutoencoderKL, UNet2DConditionModel -from ...pipeline_utils import DiffusionPipeline -from ...schedulers import ( - DDIMScheduler, - DPMSolverMultistepScheduler, - EulerAncestralDiscreteScheduler, - EulerDiscreteScheduler, - LMSDiscreteScheduler, - PNDMScheduler, -) -from ...utils import deprecate, logging -from . import StableDiffusionPipelineOutput -from .safety_checker import StableDiffusionSafetyChecker - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -class StableDiffusionImageVariationPipeline(DiffusionPipeline): - r""" - Pipeline to generate variations from an input image using Stable Diffusion. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - image_encoder ([`CLIPVisionModelWithProjection`]): - Frozen CLIP image-encoder. Stable Diffusion Image Variation uses the vision portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection), - specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. - feature_extractor ([`CLIPFeatureExtractor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - _optional_components = ["safety_checker", "feature_extractor"] - - def __init__( - self, - vae: AutoencoderKL, - image_encoder: CLIPVisionModelWithProjection, - unet: UNet2DConditionModel, - scheduler: Union[ - DDIMScheduler, - PNDMScheduler, - LMSDiscreteScheduler, - EulerDiscreteScheduler, - EulerAncestralDiscreteScheduler, - DPMSolverMultistepScheduler, - ], - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPFeatureExtractor, - requires_safety_checker: bool = True, - ): - super().__init__() - - if safety_checker is None and requires_safety_checker: - logger.warn( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( - version.parse(unet.config._diffusers_version).base_version - ) < version.parse("0.9.0.dev0") - is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 - if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: - deprecation_message = ( - "The configuration file of the unet has set the default `sample_size` to smaller than" - " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" - " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" - " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" - " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" - " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" - " in the config might lead to incorrect results in future versions. If you have downloaded this" - " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" - " the `unet/config.json` file" - ) - deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(unet.config) - new_config["sample_size"] = 64 - unet._internal_dict = FrozenDict(new_config) - - self.register_modules( - vae=vae, - image_encoder=image_encoder, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_attention_slicing - def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): - r""" - Enable sliced attention computation. - - When this option is enabled, the attention module will split the input tensor in slices, to compute attention - in several steps. This is useful to save some memory in exchange for a small speed decrease. - - Args: - slice_size (`str` or `int`, *optional*, defaults to `"auto"`): - When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If - a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, - `attention_head_dim` must be a multiple of `slice_size`. - """ - if slice_size == "auto": - if isinstance(self.unet.config.attention_head_dim, int): - # half the attention head size is usually a good trade-off between - # speed and memory - slice_size = self.unet.config.attention_head_dim // 2 - else: - # if `attention_head_dim` is a list, take the smallest head size - slice_size = min(self.unet.config.attention_head_dim) - - self.unet.set_attention_slice(slice_size) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_attention_slicing - def disable_attention_slicing(self): - r""" - Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go - back to computing attention in one step. - """ - # set slice_size = `None` to disable `attention slicing` - self.enable_attention_slicing(None) - - def enable_sequential_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, - text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a - `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. - """ - if is_accelerate_available(): - from accelerate import cpu_offload - else: - raise ImportError("Please install accelerate via `pip install accelerate`") - - device = torch.device(f"cuda:{gpu_id}") - - for cpu_offloaded_model in [self.unet, self.image_encoder, self.vae, self.safety_checker]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) - - @property - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device - def _execution_device(self): - r""" - Returns the device on which the pipeline's models will be executed. After calling - `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module - hooks. - """ - if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): - return self.device - for module in self.unet.modules(): - if ( - hasattr(module, "_hf_hook") - and hasattr(module._hf_hook, "execution_device") - and module._hf_hook.execution_device is not None - ): - return torch.device(module._hf_hook.execution_device) - return self.device - - def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): - dtype = next(self.image_encoder.parameters()).dtype - - if not isinstance(image, torch.Tensor): - image = self.feature_extractor(images=image, return_tensors="pt").pixel_values - - image = image.to(device=device, dtype=dtype) - image_embeddings = self.image_encoder(image).image_embeds - image_embeddings = image_embeddings.unsqueeze(1) - - # duplicate image embeddings for each generation per prompt, using mps friendly method - bs_embed, seq_len, _ = image_embeddings.shape - image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) - image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) - - if do_classifier_free_guidance: - uncond_embeddings = torch.zeros_like(image_embeddings) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - image_embeddings = torch.cat([uncond_embeddings, image_embeddings]) - - return image_embeddings - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker - def run_safety_checker(self, image, device, dtype): - if self.safety_checker is not None: - safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.to(dtype) - ) - else: - has_nsfw_concept = None - return image, has_nsfw_concept - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents - def decode_latents(self, latents): - latents = 1 / 0.18215 * latents - image = self.vae.decode(latents).sample - image = (image / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - return image - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - def check_inputs(self, image, height, width, callback_steps): - if ( - not isinstance(image, torch.Tensor) - and not isinstance(image, PIL.Image.Image) - and not isinstance(image, list) - ): - raise ValueError( - f"`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `list` but is {type(image)}" - ) - - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents - def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): - shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) - if latents is None: - if device.type == "mps": - # randn does not work reproducibly on mps - latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device) - else: - latents = torch.randn(shape, generator=generator, device=device, dtype=dtype) - else: - if latents.shape != shape: - raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") - latents = latents.to(device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - @torch.no_grad() - def __call__( - self, - image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor], - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[torch.Generator] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: Optional[int] = 1, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): - The image or images to guide the image generation. If you provide a tensor, it needs to comply with the - configuration of - [this](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json) - `CLIPFeatureExtractor` - height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - # 0. Default height and width to unet - height = height or self.unet.config.sample_size * self.vae_scale_factor - width = width or self.unet.config.sample_size * self.vae_scale_factor - - # 1. Check inputs. Raise error if not correct - self.check_inputs(image, height, width, callback_steps) - - # 2. Define call parameters - if isinstance(image, PIL.Image.Image): - batch_size = 1 - elif isinstance(image, list): - batch_size = len(image) - else: - batch_size = image.shape[0] - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 3. Encode input image - image_embeddings = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance) - - # 4. Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - - # 5. Prepare latent variables - num_channels_latents = self.unet.in_channels - latents = self.prepare_latents( - batch_size * num_images_per_prompt, - num_channels_latents, - height, - width, - image_embeddings.dtype, - device, - generator, - latents, - ) - - # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 7. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # 8. Post-processing - image = self.decode_latents(latents) - - # 9. Run safety checker - image, has_nsfw_concept = self.run_safety_checker(image, device, image_embeddings.dtype) - - # 10. Convert to PIL - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/utils/model_card_template.md b/spaces/Jackflack09/diffuse-custom/diffusers/utils/model_card_template.md deleted file mode 100644 index f19c85b0fcf2f7b07e9c3f950a9657b3f2053f21..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/utils/model_card_template.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{{ card_data }} ---- - - - -# {{ model_name | default("Diffusion Model") }} - -## Model description - -This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library -on the `{{ dataset_name }}` dataset. - -## Intended uses & limitations - -#### How to use - -```python -# TODO: add an example code snippet for running this diffusion pipeline -``` - -#### Limitations and bias - -[TODO: provide examples of latent issues and potential remediations] - -## Training data - -[TODO: describe the data used to train the model] - -### Training hyperparameters - -The following hyperparameters were used during training: -- learning_rate: {{ learning_rate }} -- train_batch_size: {{ train_batch_size }} -- eval_batch_size: {{ eval_batch_size }} -- gradient_accumulation_steps: {{ gradient_accumulation_steps }} -- optimizer: AdamW with betas=({{ adam_beta1 }}, {{ adam_beta2 }}), weight_decay={{ adam_weight_decay }} and epsilon={{ adam_epsilon }} -- lr_scheduler: {{ lr_scheduler }} -- lr_warmup_steps: {{ lr_warmup_steps }} -- ema_inv_gamma: {{ ema_inv_gamma }} -- ema_inv_gamma: {{ ema_power }} -- ema_inv_gamma: {{ ema_max_decay }} -- mixed_precision: {{ mixed_precision }} - -### Training results - -📈 [TensorBoard logs](https://huggingface.co/{{ repo_name }}/tensorboard?#scalars) - - diff --git a/spaces/Jikiwi/sovits-models/cluster/__init__.py b/spaces/Jikiwi/sovits-models/cluster/__init__.py deleted file mode 100644 index f1b9bde04e73e9218a5d534227caa4c25332f424..0000000000000000000000000000000000000000 --- a/spaces/Jikiwi/sovits-models/cluster/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -import numpy as np -import torch -from sklearn.cluster import KMeans - -def get_cluster_model(ckpt_path): - checkpoint = torch.load(ckpt_path) - kmeans_dict = {} - for spk, ckpt in checkpoint.items(): - km = KMeans(ckpt["n_features_in_"]) - km.__dict__["n_features_in_"] = ckpt["n_features_in_"] - km.__dict__["_n_threads"] = ckpt["_n_threads"] - km.__dict__["cluster_centers_"] = ckpt["cluster_centers_"] - kmeans_dict[spk] = km - return kmeans_dict - -def get_cluster_result(model, x, speaker): - """ - x: np.array [t, 256] - return cluster class result - """ - return model[speaker].predict(x) - -def get_cluster_center_result(model, x,speaker): - """x: np.array [t, 256]""" - predict = model[speaker].predict(x) - return model[speaker].cluster_centers_[predict] - -def get_center(model, x,speaker): - return model[speaker].cluster_centers_[x] diff --git a/spaces/JoeyFoursheds/ClonerHug/infer_pack/modules.py b/spaces/JoeyFoursheds/ClonerHug/infer_pack/modules.py deleted file mode 100644 index 960481cedad9a6106f2bf0b9e86e82b120f7b33f..0000000000000000000000000000000000000000 --- a/spaces/JoeyFoursheds/ClonerHug/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/chatgpt - windows.bat b/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/chatgpt - windows.bat deleted file mode 100644 index 0b78fdc3a559abd692e3a9e9af5e482124d13a99..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/chatgpt - windows.bat +++ /dev/null @@ -1,14 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" - -REM The web page can be accessed with delayed start http://127.0.0.1:7860/ -ping -n 5 127.0.0.1>nul - -REM access chargpt via your default browser -start "" "http://127.0.0.1:7860/" - - -echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/). \ No newline at end of file diff --git a/spaces/Jorgerv97/Herramienta_interactiva_ensenyanza_tecnicas_aprendizaje_supervisado_salud/GeneralInfo/correlation.py b/spaces/Jorgerv97/Herramienta_interactiva_ensenyanza_tecnicas_aprendizaje_supervisado_salud/GeneralInfo/correlation.py deleted file mode 100644 index de4fbf510844412830748bfab0a019caa236e3ec..0000000000000000000000000000000000000000 --- a/spaces/Jorgerv97/Herramienta_interactiva_ensenyanza_tecnicas_aprendizaje_supervisado_salud/GeneralInfo/correlation.py +++ /dev/null @@ -1,45 +0,0 @@ -from shiny import module, ui, reactive - -@module.ui -def correlation_1_ui(): - return ui.div( - {"id": "correlation-info"}, - ui.tags.h3("CORRELACIÓN DE DATOS"), - ui.tags.p("La eliminación de características correlacionadas entre sí es otro paso esencial pero que suele ser olvidado. Es importante tener en cuenta que el criterio para eliminar características correlacionadas debe basarse en análisis adecuados y en la comprensión del dominio del problema, para evitar la eliminación de características relevantes para la predicción." - , style="padding-right:50px; padding-top:20px; text-align: justify; text-justify: inter-word;"), - ui.input_action_button("show_corr_benefits", "Beneficios de eliminar variables correlacionadas ▽" - , style="padding: 0px 0px 10px 0px; background: white; border: none; font-weight: bold; text-decoration: underline; border: 0 !important; box-shadow: 0 0 !important; transition: 0.1s !important; background-color: transparent !important;"), - ) - -@module.ui -def correlation_2_ui(): - return ui.div( - ui.markdown("Ahora puedes ver la correlación entre todos los datos y eliminar automáticamente las características con una correlación superior a la que marques. Los coeficientes de correlación cuyo **valor absoluto está por encima de 0.7 indican variables que pueden considerarse altamente correlacionadas**. Mi recomendación es eliminarlas automáticamente, pero también puedes eliminar características de forma manual con el selector y el botón del apartado anterior.") - , style="padding-right:50px; padding-top:20px; text-align: justify; text-justify: inter-word;" - ) - - -@module.server -def correlation_server(input, output, session): - @reactive.Effect - @reactive.event(input.show_corr_benefits) - def _(): - show_corr_benef_button = input.show_corr_benefits() - if show_corr_benef_button % 2 == 1: - ui.update_action_button("show_corr_benefits", label="Beneficios de eliminar variables correlacionadas △") - ui.insert_ui( - ui.div({"id": "inserted-corr-benef-info"}, - ui.markdown("""La eliminación de variables correlacionadas aporta los siguientes beneficios: -- **Reducción de la redundancia**: Las características correlacionadas proporcionan información redundante o duplicada. Esto aumenta la complejidad del modelo y también puede conducir a un sobreajuste (overfitting), donde el modelo se ajusta demasiado a los datos de entrenamiento y no generaliza bien a nuevos datos. -- **Mejora de la interpretación del modelo**: Un conjunto de características más independientes y distintas mejorará la interpretabilidad del modelo, ya que puede ser difícil discernir la contribución individual de cada una. -- **Eficiencia computacional**: Al eliminar características correlacionadas, se reduce la dimensionalidad del conjunto de datos. Esto mejora la eficiencia computacional durante el entrenamiento y la inferencia del modelo, ya que se reducen los cálculos necesarios y el consumo de recursos. -- **Evitar la multicolinealidad**: La multicolinealidad se refiere a la alta correlación entre dos o más características. Cuando existe multicolinealidad, la matriz de diseño utilizada en algunos algoritmos de aprendizaje puede volverse inestable, lo que afecta la calidad de los resultados.""" - ), - style="border: solid 0px grey; border-radius: 10px; background:#eceef1 ;margin-right:50px; padding:15px 20px 10px 20px; text-align: justify; text-justify: inter-word;", - ), - selector="#correlation-info", - where="beforeEnd", - ) - else: - ui.update_action_button("show_corr_benefits", label="Beneficios de eliminar variables correlacionadas ▽") - ui.remove_ui("#inserted-corr-benef-info") \ No newline at end of file diff --git a/spaces/KyanChen/FunSR/datasets/image_folder.py b/spaces/KyanChen/FunSR/datasets/image_folder.py deleted file mode 100644 index 0e7d87dd772fa5de03fc70ee775f9123bb7ee1da..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/FunSR/datasets/image_folder.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import json -from PIL import Image - -import pickle -import imageio -import numpy as np -import torch -from torch.utils.data import Dataset -from torchvision import transforms - -from datasets import register - - -@register('image-folder') -class ImageFolder(Dataset): - - def __init__(self, root_path, split_file=None, split_key=None, first_k=None, - repeat=1, cache='none'): - self.repeat = repeat - self.cache = cache - - if split_file is None: - filenames = sorted(os.listdir(root_path)) - else: - with open(split_file, 'r') as f: - filenames = json.load(f)[split_key] - if first_k is not None: - filenames = filenames[:first_k] - - self.files = [] - for filename in filenames: - file = os.path.join(root_path, filename) - - if cache == 'none': - self.files.append(file) - - elif cache == 'bin': - bin_root = os.path.join(os.path.dirname(root_path), - '_bin_' + os.path.basename(root_path)) - if not os.path.exists(bin_root): - os.mkdir(bin_root) - print('mkdir', bin_root) - bin_file = os.path.join( - bin_root, filename.split('.')[0] + '.pkl') - if not os.path.exists(bin_file): - with open(bin_file, 'wb') as f: - pickle.dump(imageio.imread(file), f) - print('dump', bin_file) - self.files.append(bin_file) - - elif cache == 'in_memory': - self.files.append(transforms.ToTensor()( - Image.open(file).convert('RGB'))) - - def __len__(self): - return len(self.files) * self.repeat - - def __getitem__(self, idx): - x = self.files[idx % len(self.files)] - - if self.cache == 'none': - return transforms.ToTensor()(Image.open(x).convert('RGB')) - - elif self.cache == 'bin': - with open(x, 'rb') as f: - x = pickle.load(f) - x = np.ascontiguousarray(x.transpose(2, 0, 1)) - x = torch.from_numpy(x).float() / 255 - return x - - elif self.cache == 'in_memory': - return x - - -@register('paired-image-folders') -class PairedImageFolders(Dataset): - - def __init__(self, root_path_1, root_path_2, **kwargs): - self.dataset_1 = ImageFolder(root_path_1, **kwargs) - self.dataset_2 = ImageFolder(root_path_2, **kwargs) - - def __len__(self): - return len(self.dataset_1) - - def __getitem__(self, idx): - return self.dataset_1[idx], self.dataset_2[idx] diff --git a/spaces/Lamai/LAMAIGPT/autogpt/memory/milvus.py b/spaces/Lamai/LAMAIGPT/autogpt/memory/milvus.py deleted file mode 100644 index 44aa72b956224fa4c2a16d5f40b0eaeb35e98581..0000000000000000000000000000000000000000 --- a/spaces/Lamai/LAMAIGPT/autogpt/memory/milvus.py +++ /dev/null @@ -1,115 +0,0 @@ -""" Milvus memory storage provider.""" -from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections - -from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding - - -class MilvusMemory(MemoryProviderSingleton): - """Milvus memory storage provider.""" - - def __init__(self, cfg) -> None: - """Construct a milvus memory storage connection. - - Args: - cfg (Config): Auto-GPT global config. - """ - # connect to milvus server. - connections.connect(address=cfg.milvus_addr) - fields = [ - FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True), - FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536), - FieldSchema(name="raw_text", dtype=DataType.VARCHAR, max_length=65535), - ] - - # create collection if not exist and load it. - self.milvus_collection = cfg.milvus_collection - self.schema = CollectionSchema(fields, "auto-gpt memory storage") - self.collection = Collection(self.milvus_collection, self.schema) - # create index if not exist. - if not self.collection.has_index(): - self.collection.release() - self.collection.create_index( - "embeddings", - { - "metric_type": "IP", - "index_type": "HNSW", - "params": {"M": 8, "efConstruction": 64}, - }, - index_name="embeddings", - ) - self.collection.load() - - def add(self, data) -> str: - """Add an embedding of data into memory. - - Args: - data (str): The raw text to construct embedding index. - - Returns: - str: log. - """ - embedding = get_ada_embedding(data) - result = self.collection.insert([[embedding], [data]]) - _text = ( - "Inserting data into memory at primary key: " - f"{result.primary_keys[0]}:\n data: {data}" - ) - return _text - - def get(self, data): - """Return the most relevant data in memory. - Args: - data: The data to compare to. - """ - return self.get_relevant(data, 1) - - def clear(self) -> str: - """Drop the index in memory. - - Returns: - str: log. - """ - self.collection.drop() - self.collection = Collection(self.milvus_collection, self.schema) - self.collection.create_index( - "embeddings", - { - "metric_type": "IP", - "index_type": "HNSW", - "params": {"M": 8, "efConstruction": 64}, - }, - index_name="embeddings", - ) - self.collection.load() - return "Obliviated" - - def get_relevant(self, data: str, num_relevant: int = 5): - """Return the top-k relevant data in memory. - Args: - data: The data to compare to. - num_relevant (int, optional): The max number of relevant data. - Defaults to 5. - - Returns: - list: The top-k relevant data. - """ - # search the embedding and return the most relevant text. - embedding = get_ada_embedding(data) - search_params = { - "metrics_type": "IP", - "params": {"nprobe": 8}, - } - result = self.collection.search( - [embedding], - "embeddings", - search_params, - num_relevant, - output_fields=["raw_text"], - ) - return [item.entity.value_of_field("raw_text") for item in result[0]] - - def get_stats(self) -> str: - """ - Returns: The stats of the milvus cache. - """ - return f"Entities num: {self.collection.num_entities}" diff --git a/spaces/Lbin123/Lbingo/src/lib/bots/bing/types.ts b/spaces/Lbin123/Lbingo/src/lib/bots/bing/types.ts deleted file mode 100644 index 02cd5e8b01e3529642d28dc1539bf958f4ac420b..0000000000000000000000000000000000000000 --- a/spaces/Lbin123/Lbingo/src/lib/bots/bing/types.ts +++ /dev/null @@ -1,259 +0,0 @@ -export type Author = 'user' | 'system' | 'bot' - -export type BotId = 'bing' - -export enum BingConversationStyle { - Creative = 'Creative', - Balanced = 'Balanced', - Precise = 'Precise' -} - -export enum ErrorCode { - CONVERSATION_LIMIT = 'CONVERSATION_LIMIT', - BING_UNAUTHORIZED = 'BING_UNAUTHORIZED', - BING_FORBIDDEN = 'BING_FORBIDDEN', - BING_CAPTCHA = 'BING_CAPTCHA', - THROTTLE_LIMIT = 'THROTTLE_LIMIT', - NOTFOUND_ERROR = 'NOT_FOUND_ERROR', - UNKOWN_ERROR = 'UNKOWN_ERROR', - NETWORK_ERROR = 'NETWORK_ERROR', -} - -export class ChatError extends Error { - code: ErrorCode - constructor(message: string, code: ErrorCode) { - super(message) - this.code = code - } -} - -export type ChatMessageModel = { - id: string - author: Author - text: string - error?: ChatError - throttling?: Throttling - sourceAttributions?: SourceAttribution[] - suggestedResponses?: SuggestedResponse[] -} - -export interface ConversationModel { - messages: ChatMessageModel[] -} - -export type Event = - | { - type: 'UPDATE_ANSWER' - data: { - text: string - spokenText?: string - sourceAttributions?: SourceAttribution[] - suggestedResponses?: SuggestedResponse[] - throttling?: Throttling - } - } - | { - type: 'DONE' - } - | { - type: 'ERROR' - error: ChatError - } - -export interface SendMessageParams { - prompt: string - imageUrl?: string - options: T - onEvent: (event: Event) => void - signal?: AbortSignal -} - -export interface ConversationResponse { - conversationId: string - clientId: string - conversationSignature: string - result: { - value: string - message?: string - } -} - -export interface Telemetry { - metrics?: null - startTime: string -} - -export interface ChatUpdateArgument { - messages?: ChatResponseMessage[] - throttling?: Throttling - requestId: string - result: null -} - -export type ChatUpdateCompleteResponse = { - type: 2 - invocationId: string - item: ChatResponseItem -} | { - type: 1 - target: string - arguments: ChatUpdateArgument[] -} | { - type: 3 - invocationId: string -} | { - type: 6 | 7 -} - -export interface ChatRequestResult { - value: string - serviceVersion: string - error?: string -} - -export interface ChatResponseItem { - messages: ChatResponseMessage[] - firstNewMessageIndex: number - suggestedResponses: null - conversationId: string - requestId: string - conversationExpiryTime: string - telemetry: Telemetry - result: ChatRequestResult - throttling: Throttling -} -export enum InvocationEventType { - Invocation = 1, - StreamItem = 2, - Completion = 3, - StreamInvocation = 4, - CancelInvocation = 5, - Ping = 6, - Close = 7, -} - -// https://github.com/bytemate/bingchat-api/blob/main/src/lib.ts - -export interface ConversationInfo { - conversationId: string - clientId: string - conversationSignature: string - invocationId: number - conversationStyle: BingConversationStyle - prompt: string - imageUrl?: string -} - -export interface BingChatResponse { - conversationSignature: string - conversationId: string - clientId: string - invocationId: number - conversationExpiryTime: Date - response: string - details: ChatResponseMessage -} - -export interface Throttling { - maxNumLongDocSummaryUserMessagesInConversation: number - maxNumUserMessagesInConversation: number - numLongDocSummaryUserMessagesInConversation: number - numUserMessagesInConversation: number -} - -export interface ChatResponseMessage { - text: string - spokenText?: string - author: string - createdAt: Date - timestamp: Date - messageId: string - requestId: string - offense: string - adaptiveCards: AdaptiveCard[] - sourceAttributions: SourceAttribution[] - feedback: Feedback - contentOrigin: string - messageType?: string - contentType?: string - privacy: null - suggestedResponses: SuggestedResponse[] -} - -export interface AdaptiveCard { - type: string - version: string - body: Body[] -} - -export interface Body { - type: string - text: string - wrap: boolean - size?: string -} - -export interface Feedback { - tag: null - updatedOn: null - type: string -} - -export interface SourceAttribution { - providerDisplayName: string - seeMoreUrl: string - searchQuery: string -} - -export interface SuggestedResponse { - text: string - author?: Author - createdAt?: Date - timestamp?: Date - messageId?: string - messageType?: string - offense?: string - feedback?: Feedback - contentOrigin?: string - privacy?: null -} - -export interface KBlobRequest { - knowledgeRequest: KnowledgeRequestContext - imageBase64?: string -} - -export interface KBlobResponse { - blobId: string - processedBlobId?: string -} - -export interface KnowledgeRequestContext { - imageInfo: ImageInfo; - knowledgeRequest: KnowledgeRequest; -} - -export interface ImageInfo { - url?: string; -} - -export interface KnowledgeRequest { - invokedSkills: string[]; - subscriptionId: string; - invokedSkillsRequestData: InvokedSkillsRequestData; - convoData: ConvoData; -} - -export interface ConvoData { - convoid: string; - convotone: BingConversationStyle; -} - -export interface InvokedSkillsRequestData { - enableFaceBlur: boolean; -} - -export interface FileItem { - url: string; - status?: 'loading' | 'error' | 'loaded' -} diff --git a/spaces/Lianjd/stock_dashboard/backtrader/indicators/ichimoku.py b/spaces/Lianjd/stock_dashboard/backtrader/indicators/ichimoku.py deleted file mode 100644 index 26fe2f7401c706bc289d54d98f957029b8138c95..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/indicators/ichimoku.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import backtrader as bt -from . import Highest, Lowest - - -class Ichimoku(bt.Indicator): - ''' - Developed and published in his book in 1969 by journalist Goichi Hosoda - - Formula: - - tenkan_sen = (Highest(High, tenkan) + Lowest(Low, tenkan)) / 2.0 - - kijun_sen = (Highest(High, kijun) + Lowest(Low, kijun)) / 2.0 - - The next 2 are pushed 26 bars into the future - - - senkou_span_a = (tenkan_sen + kijun_sen) / 2.0 - - senkou_span_b = ((Highest(High, senkou) + Lowest(Low, senkou)) / 2.0 - - This is pushed 26 bars into the past - - - chikou = close - - The cloud (Kumo) is formed by the area between the senkou_spans - - See: - - http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud - - ''' - lines = ('tenkan_sen', 'kijun_sen', - 'senkou_span_a', 'senkou_span_b', 'chikou_span',) - params = ( - ('tenkan', 9), - ('kijun', 26), - ('senkou', 52), - ('senkou_lead', 26), # forward push - ('chikou', 26), # backwards push - ) - - plotinfo = dict(subplot=False) - plotlines = dict( - senkou_span_a=dict(_fill_gt=('senkou_span_b', 'g'), - _fill_lt=('senkou_span_b', 'r')), - ) - - def __init__(self): - hi_tenkan = Highest(self.data.high, period=self.p.tenkan) - lo_tenkan = Lowest(self.data.low, period=self.p.tenkan) - self.l.tenkan_sen = (hi_tenkan + lo_tenkan) / 2.0 - - hi_kijun = Highest(self.data.high, period=self.p.kijun) - lo_kijun = Lowest(self.data.low, period=self.p.kijun) - self.l.kijun_sen = (hi_kijun + lo_kijun) / 2.0 - - senkou_span_a = (self.l.tenkan_sen + self.l.kijun_sen) / 2.0 - self.l.senkou_span_a = senkou_span_a(-self.p.senkou_lead) - - hi_senkou = Highest(self.data.high, period=self.p.senkou) - lo_senkou = Lowest(self.data.low, period=self.p.senkou) - senkou_span_b = (hi_senkou + lo_senkou) / 2.0 - self.l.senkou_span_b = senkou_span_b(-self.p.senkou_lead) - - self.l.chikou_span = self.data.close(self.p.chikou) - - super(Ichimoku, self).__init__() diff --git a/spaces/Lianjd/stock_dashboard/backtrader/plot/finance.py b/spaces/Lianjd/stock_dashboard/backtrader/plot/finance.py deleted file mode 100644 index 2c01d59a0518e84818efb5161b05edc37e6f5679..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/plot/finance.py +++ /dev/null @@ -1,594 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -from ..utils.py3 import range, zip - -import matplotlib.collections as mcol -import matplotlib.colors as mcolors -import matplotlib.legend as mlegend -import matplotlib.lines as mlines - -from .utils import shade_color - - -class CandlestickPlotHandler(object): - legend_opens = [0.50, 0.50, 0.50] - legend_highs = [1.00, 1.00, 1.00] - legend_lows = [0.00, 0.00, 0.00] - legend_closes = [0.80, 0.00, 1.00] - - def __init__(self, - ax, x, opens, highs, lows, closes, - colorup='k', colordown='r', - edgeup=None, edgedown=None, - tickup=None, tickdown=None, - width=1, tickwidth=1, - edgeadjust=0.05, edgeshading=-10, - alpha=1.0, - label='_nolegend', - fillup=True, - filldown=True, - **kwargs): - - # Manager up/down bar colors - r, g, b = mcolors.colorConverter.to_rgb(colorup) - self.colorup = r, g, b, alpha - r, g, b = mcolors.colorConverter.to_rgb(colordown) - self.colordown = r, g, b, alpha - # Manage the edge up/down colors for the bars - if edgeup: - r, g, b = mcolors.colorConverter.to_rgb(edgeup) - self.edgeup = ((r, g, b, alpha),) - else: - self.edgeup = shade_color(self.colorup, edgeshading) - - if edgedown: - r, g, b = mcolors.colorConverter.to_rgb(edgedown) - self.edgedown = ((r, g, b, alpha),) - else: - self.edgedown = shade_color(self.colordown, edgeshading) - - # Manage the up/down tick colors - if tickup: - r, g, b = mcolors.colorConverter.to_rgb(tickup) - self.tickup = ((r, g, b, alpha),) - else: - self.tickup = self.edgeup - - if tickdown: - r, g, b = mcolors.colorConverter.to_rgb(tickdown) - self.tickdown = ((r, g, b, alpha),) - else: - self.tickdown = self.edgedown - - self.barcol, self.tickcol = self.barcollection( - x, opens, highs, lows, closes, - width, tickwidth, edgeadjust, - label=label, - fillup=fillup, filldown=filldown, - **kwargs) - - # add collections to the axis and return them - ax.add_collection(self.tickcol) - ax.add_collection(self.barcol) - - # Update the axis - ax.update_datalim(((0, min(lows)), (len(opens), max(highs)))) - ax.autoscale_view() - - # Add self as legend handler for this object - mlegend.Legend.update_default_handler_map({self.barcol: self}) - - def legend_artist(self, legend, orig_handle, fontsize, handlebox): - x0 = handlebox.xdescent - y0 = handlebox.ydescent - width = handlebox.width / len(self.legend_opens) - height = handlebox.height - - # Generate the x axis coordinates (handlebox based) - xs = [x0 + width * (i + 0.5) for i in range(len(self.legend_opens))] - - barcol, tickcol = self.barcollection( - xs, - self.legend_opens, self.legend_highs, - self.legend_lows, self.legend_closes, - width=width, tickwidth=2, - scaling=height, bot=y0) - - barcol.set_transform(handlebox.get_transform()) - handlebox.add_artist(barcol) - tickcol.set_transform(handlebox.get_transform()) - handlebox.add_artist(tickcol) - - return barcol, tickcol - - def barcollection(self, - xs, - opens, highs, lows, closes, - width, tickwidth=1, edgeadjust=0, - label='_nolegend', - scaling=1.0, bot=0, - fillup=True, filldown=True, - **kwargs): - - # Prepack different zips of the series values - oc = lambda: zip(opens, closes) # NOQA: E731 - xoc = lambda: zip(xs, opens, closes) # NOQA: E731 - iohlc = lambda: zip(xs, opens, highs, lows, closes) # NOQA: E731 - - colorup = self.colorup if fillup else 'None' - colordown = self.colordown if filldown else 'None' - colord = {True: colorup, False: colordown} - colors = [colord[o < c] for o, c in oc()] - - edgecolord = {True: self.edgeup, False: self.edgedown} - edgecolors = [edgecolord[o < c] for o, c in oc()] - - tickcolord = {True: self.tickup, False: self.tickdown} - tickcolors = [tickcolord[o < c] for o, c in oc()] - - delta = width / 2 - edgeadjust - - def barbox(i, open, close): - # delta seen as closure - left, right = i - delta, i + delta - open = open * scaling + bot - close = close * scaling + bot - return (left, open), (left, close), (right, close), (right, open) - - barareas = [barbox(i, o, c) for i, o, c in xoc()] - - def tup(i, open, high, close): - high = high * scaling + bot - open = open * scaling + bot - close = close * scaling + bot - - return (i, high), (i, max(open, close)) - - tickrangesup = [tup(i, o, h, c) for i, o, h, l, c in iohlc()] - - def tdown(i, open, low, close): - low = low * scaling + bot - open = open * scaling + bot - close = close * scaling + bot - - return (i, low), (i, min(open, close)) - - tickrangesdown = [tdown(i, o, l, c) for i, o, h, l, c in iohlc()] - - # Extra variables for the collections - useaa = 0, # use tuple here - lw = 0.5, # and here - tlw = tickwidth, - - # Bar collection for the candles - barcol = mcol.PolyCollection( - barareas, - facecolors=colors, - edgecolors=edgecolors, - antialiaseds=useaa, - linewidths=lw, - label=label, - **kwargs) - - # LineCollections have a higher zorder than PolyCollections - # to ensure the edges of the bars are not overwriten by the Lines - # we need to put the bars slightly over the LineCollections - kwargs['zorder'] = barcol.get_zorder() * 0.9999 - - # Up/down ticks from the body - tickcol = mcol.LineCollection( - tickrangesup + tickrangesdown, - colors=tickcolors, - linewidths=tlw, - antialiaseds=useaa, - **kwargs) - - # return barcol, tickcol - return barcol, tickcol - - -def plot_candlestick(ax, - x, opens, highs, lows, closes, - colorup='k', colordown='r', - edgeup=None, edgedown=None, - tickup=None, tickdown=None, - width=1, tickwidth=1.25, - edgeadjust=0.05, edgeshading=-10, - alpha=1.0, - label='_nolegend', - fillup=True, - filldown=True, - **kwargs): - - chandler = CandlestickPlotHandler( - ax, x, opens, highs, lows, closes, - colorup, colordown, - edgeup, edgedown, - tickup, tickdown, - width, tickwidth, - edgeadjust, edgeshading, - alpha, - label, - fillup, - filldown, - **kwargs) - - # Return the collections. the barcol goes first because - # is the larger, has the dominant zorder and defines the legend - return chandler.barcol, chandler.tickcol - - -class VolumePlotHandler(object): - legend_vols = [0.5, 1.0, 0.75] - legend_opens = [0, 1, 0] - legend_closes = [1, 0, 1] - - def __init__(self, - ax, x, opens, closes, volumes, - colorup='k', colordown='r', - edgeup=None, edgedown=None, - edgeshading=-5, edgeadjust=0.05, - width=1, alpha=1.0, - **kwargs): - - # Manage the up/down colors - r, g, b = mcolors.colorConverter.to_rgb(colorup) - self.colorup = r, g, b, alpha - r, g, b = mcolors.colorConverter.to_rgb(colordown) - self.colordown = r, g, b, alpha - - # Prepare the edge colors - if not edgeup: - self.edgeup = shade_color(self.colorup, edgeshading) - else: - r, g, b = mcolors.colorConverter.to_rgb(edgeup) - self.edgeup = r, g, b, alpha - - if not edgedown: - self.edgedown = shade_color(self.colordown, edgeshading) - else: - r, g, b = mcolors.colorConverter.to_rgb(edgedown) - self.edgedown = r, g, b, alpha - - corners = (0, 0), (len(closes), max(volumes)) - ax.update_datalim(corners) - ax.autoscale_view() - - self.barcol = self.barcollection( - x, opens, closes, volumes, - width=width, edgeadjust=edgeadjust, - **kwargs) - - # add to axes - ax.add_collection(self.barcol) - - # Add a legend handler for this object - mlegend.Legend.update_default_handler_map({self.barcol: self}) - - def legend_artist(self, legend, orig_handle, fontsize, handlebox): - x0 = handlebox.xdescent - y0 = handlebox.ydescent - width = handlebox.width / len(self.legend_vols) - height = handlebox.height - - # Generate the x axis coordinates (handlebox based) - xs = [x0 + width * (i + 0.5) for i in range(len(self.legend_vols))] - - barcol = self.barcollection( - xs, self.legend_opens, self.legend_closes, self.legend_vols, - width=width, vscaling=height, vbot=y0) - - barcol.set_transform(handlebox.get_transform()) - handlebox.add_artist(barcol) - - return barcol - - def barcollection(self, - x, opens, closes, vols, - width, edgeadjust=0, - vscaling=1.0, vbot=0, - **kwargs): - - # Prepare the data - openclose = lambda: zip(opens, closes) # NOQA: E731 - - # Calculate bars colors - colord = {True: self.colorup, False: self.colordown} - colors = [colord[open < close] for open, close in openclose()] - edgecolord = {True: self.edgeup, False: self.edgedown} - edgecolors = [edgecolord[open < close] for open, close in openclose()] - - # bar width to the sides - delta = width / 2 - edgeadjust - - # small auxiliary func to return the bar coordinates - def volbar(i, v): - left, right = i - delta, i + delta - v = vbot + v * vscaling - return (left, vbot), (left, v), (right, v), (right, vbot) - - barareas = [volbar(i, v) for i, v in zip(x, vols)] - barcol = mcol.PolyCollection( - barareas, - facecolors=colors, - edgecolors=edgecolors, - antialiaseds=(0,), - linewidths=(0.5,), - **kwargs) - - return barcol - - -def plot_volume( - ax, x, opens, closes, volumes, - colorup='k', colordown='r', - edgeup=None, edgedown=None, - edgeshading=-5, edgeadjust=0.05, - width=1, alpha=1.0, - **kwargs): - - vhandler = VolumePlotHandler( - ax, x, opens, closes, volumes, - colorup, colordown, - edgeup, edgedown, - edgeshading, edgeadjust, - width, alpha, - **kwargs) - - return vhandler.barcol, - - -class OHLCPlotHandler(object): - legend_opens = [0.50, 0.50, 0.50] - legend_highs = [1.00, 1.00, 1.00] - legend_lows = [0.00, 0.00, 0.00] - legend_closes = [0.80, 0.20, 0.90] - - def __init__(self, - ax, x, opens, highs, lows, closes, - colorup='k', colordown='r', - width=1, tickwidth=0.5, - alpha=1.0, - label='_nolegend', - **kwargs): - - # Manager up/down bar colors - r, g, b = mcolors.colorConverter.to_rgb(colorup) - self.colorup = r, g, b, alpha - r, g, b = mcolors.colorConverter.to_rgb(colordown) - self.colordown = r, g, b, alpha - - bcol, ocol, ccol = self.barcollection( - x, opens, highs, lows, closes, - width=width, tickwidth=tickwidth, - label=label, - **kwargs) - - self.barcol = bcol - self.opencol = ocol - self.closecol = ccol - - # add collections to the axis and return them - ax.add_collection(self.barcol) - ax.add_collection(self.opencol) - ax.add_collection(self.closecol) - - # Update the axis - ax.update_datalim(((0, min(lows)), (len(opens), max(highs)))) - ax.autoscale_view() - - # Add self as legend handler for this object - mlegend.Legend.update_default_handler_map({self.barcol: self}) - - def legend_artist(self, legend, orig_handle, fontsize, handlebox): - x0 = handlebox.xdescent - y0 = handlebox.ydescent - width = handlebox.width / len(self.legend_opens) - height = handlebox.height - - # Generate the x axis coordinates (handlebox based) - xs = [x0 + width * (i + 0.5) for i in range(len(self.legend_opens))] - - barcol, opencol, closecol = self.barcollection( - xs, - self.legend_opens, self.legend_highs, - self.legend_lows, self.legend_closes, - width=1.5, tickwidth=2, - scaling=height, bot=y0) - - barcol.set_transform(handlebox.get_transform()) - handlebox.add_artist(barcol) - # opencol.set_transform(handlebox.get_transform()) - handlebox.add_artist(opencol) - # closecol.set_transform(handlebox.get_transform()) - handlebox.add_artist(closecol) - - return barcol, opencol, closecol - - def barcollection(self, - xs, - opens, highs, lows, closes, - width, tickwidth, - label='_nolegend', - scaling=1.0, bot=0, - **kwargs): - - # Prepack different zips of the series values - ihighlow = lambda: zip(xs, highs, lows) # NOQA: E731 - iopen = lambda: zip(xs, opens) # NOQA: E731 - iclose = lambda: zip(xs, closes) # NOQA: E731 - openclose = lambda: zip(opens, closes) # NOQA: E731 - - colord = {True: self.colorup, False: self.colordown} - colors = [colord[open < close] for open, close in openclose()] - - # Extra variables for the collections - useaa = 0, - lw = width, - tlw = tickwidth, - - # Calculate the barranges - def barrange(i, high, low): - return (i, low * scaling + bot), (i, high * scaling + bot) - - barranges = [barrange(i, high, low) for i, high, low in ihighlow()] - - barcol = mcol.LineCollection( - barranges, - colors=colors, - linewidths=lw, - antialiaseds=useaa, - label=label, - **kwargs) - - def tickopen(i, open): - open = open * scaling + bot - return (i - tickwidth, open), (i, open) - - openticks = [tickopen(i, open) for i, open in iopen()] - opencol = mcol.LineCollection( - openticks, - colors=colors, - antialiaseds=useaa, - linewidths=tlw, - label='_nolegend', - **kwargs) - - def tickclose(i, close): - close = close * scaling + bot - return (i, close), (i + tickwidth, close) - - closeticks = [tickclose(i, close) for i, close in iclose()] - closecol = mcol.LineCollection( - closeticks, - colors=colors, - antialiaseds=useaa, - linewidths=tlw, - label='_nolegend', - **kwargs) - - # return barcol, tickcol - return barcol, opencol, closecol - - -def plot_ohlc(ax, x, opens, highs, lows, closes, - colorup='k', colordown='r', - width=1.5, tickwidth=0.5, - alpha=1.0, - label='_nolegend', - **kwargs): - - handler = OHLCPlotHandler( - ax, x, opens, highs, lows, closes, - colorup, colordown, - width, tickwidth, - alpha, - label, - **kwargs) - - return handler.barcol, handler.opencol, handler.closecol - - -class LineOnClosePlotHandler(object): - legend_closes = [0.00, 0.66, 0.33, 1.00] - - def __init__(self, - ax, x, closes, color='k', - width=1, alpha=1.0, - label='_nolegend', - **kwargs): - - self.color = color - self.alpha = alpha - - self.loc, = self.barcollection( - x, closes, - width=width, - label=label, - **kwargs) - - # add collections to the axis and return them - ax.add_line(self.loc) - - # Update the axis - ax.update_datalim(((x[0], min(closes)), (x[-1], max(closes)))) - ax.autoscale_view() - - # Add self as legend handler for this object - mlegend.Legend.update_default_handler_map({self.loc: self}) - - def legend_artist(self, legend, orig_handle, fontsize, handlebox): - x0 = handlebox.xdescent - y0 = handlebox.ydescent - width = handlebox.width / len(self.legend_closes) - height = handlebox.height - - # Generate the x axis coordinates (handlebox based) - xs = [x0 + width * (i + 0.5) for i in range(len(self.legend_closes))] - - linecol, = self.barcollection( - xs, self.legend_closes, - width=1.5, - scaling=height, bot=y0) - - linecol.set_transform(handlebox.get_transform()) - handlebox.add_artist(linecol) - - return linecol, - - def barcollection(self, - xs, closes, - width, - label='_nolegend', - scaling=1.0, bot=0, - **kwargs): - - # Prepack different zips of the series values - scaled = [close * scaling + bot for close in closes] - - loc = mlines.Line2D( - xs, scaled, - color=self.color, - lw=width, - label=label, - alpha=self.alpha, - **kwargs) - - return loc, - - -def plot_lineonclose(ax, x, closes, - color='k', - width=1.5, - alpha=1.0, - label='_nolegend', - **kwargs): - - handler = LineOnClosePlotHandler( - ax, x, closes, - color=color, width=width, - alpha=alpha, label=label, - **kwargs) - - return handler.loc, diff --git a/spaces/LinkSoul/Chinese-LLaVa/static/css/index.css b/spaces/LinkSoul/Chinese-LLaVa/static/css/index.css deleted file mode 100644 index 21076ef552588e5831c9e503067762142cb7c9c0..0000000000000000000000000000000000000000 --- a/spaces/LinkSoul/Chinese-LLaVa/static/css/index.css +++ /dev/null @@ -1,157 +0,0 @@ -body { - font-family: 'Noto Sans', sans-serif; -} - - -.footer .icon-link { - font-size: 25px; - color: #000; -} - -.link-block a { - margin-top: 5px; - margin-bottom: 5px; -} - -.dnerf { - font-variant: small-caps; -} - - -.teaser .hero-body { - padding-top: 0; - padding-bottom: 3rem; -} - -.teaser { - font-family: 'Google Sans', sans-serif; -} - - -.publication-title { -} - -.publication-banner { - max-height: parent; - -} - -.publication-banner video { - position: relative; - left: auto; - top: auto; - transform: none; - object-fit: fit; -} - -.publication-header .hero-body { -} - -.publication-title { - font-family: 'Google Sans', sans-serif; -} - -.publication-authors { - font-family: 'Google Sans', sans-serif; -} - -.publication-venue { - color: #555; - width: fit-content; - font-weight: bold; -} - -.publication-awards { - color: #ff3860; - width: fit-content; - font-weight: bolder; -} - -.publication-authors { -} - -.publication-authors a { - color: hsl(204, 86%, 53%) !important; -} - -.publication-authors a:hover { - text-decoration: underline; -} - -.author-block { - display: inline-block; -} - -.publication-banner img { -} - -.publication-authors { - /*color: #4286f4;*/ -} - -.publication-video { - position: relative; - width: 100%; - height: 0; - padding-bottom: 56.25%; - - overflow: hidden; - border-radius: 10px !important; -} - -.publication-video iframe { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; -} - -.publication-body img { -} - -.results-carousel { - overflow: hidden; -} - -.results-carousel .item { - margin: 5px; - overflow: hidden; - border: 1px solid #bbb; - border-radius: 10px; - padding: 0; - font-size: 0; -} - -.results-carousel video { - margin: 0; -} - - -.interpolation-panel { - background: #f5f5f5; - border-radius: 10px; -} - -.interpolation-panel .interpolation-image { - width: 100%; - border-radius: 5px; -} - -.interpolation-video-column { -} - -.interpolation-panel .slider { - margin: 0 !important; -} - -.interpolation-panel .slider { - margin: 0 !important; -} - -#interpolation-image-wrapper { - width: 100%; -} -#interpolation-image-wrapper img { - border-radius: 5px; -} diff --git a/spaces/LuxOAI/zenFace-Recognition-SDK/run.sh b/spaces/LuxOAI/zenFace-Recognition-SDK/run.sh deleted file mode 100644 index f6ec105cddeb64569bb4669bf99897260d4753f2..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/zenFace-Recognition-SDK/run.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -exec python3 app.py & -exec python3 gradio/demo.py \ No newline at end of file diff --git a/spaces/Madhur-01/Question-Answering-system/app.py b/spaces/Madhur-01/Question-Answering-system/app.py deleted file mode 100644 index 443c72134d35d80689eef16c8632fd77ed4b3f21..0000000000000000000000000000000000000000 --- a/spaces/Madhur-01/Question-Answering-system/app.py +++ /dev/null @@ -1,25 +0,0 @@ -from transformers import pipeline -import streamlit as st - -# Load the question answering model -question_answerer = pipeline("question-answering", model="Madhur-01/my_awesome_qa_model") - -# Streamlit App -def main(): - st.title("Text-based Question Answering") - - # Input fields for context and question - context = st.text_area("Enter context paragraph:") - question = st.text_input("Enter question:") - - if st.button("Get Answer"): - if context and question: - # Get the answer from the model - answer = question_answerer(question=question, context=context) - st.write("Answer:", answer["answer"]) - else: - st.write("Please enter both context and question.") - -if __name__ == "__main__": - main() - diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/model/losses.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/model/losses.py deleted file mode 100644 index 60a2894b6f5b330aa4baa56db226e8a59cb8c1ae..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/model/losses.py +++ /dev/null @@ -1,68 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from collections import defaultdict - - -def dice_loss(input_mask, cls_gt): - num_objects = input_mask.shape[1] - losses = [] - for i in range(num_objects): - mask = input_mask[:,i].flatten(start_dim=1) - # background not in mask, so we add one to cls_gt - gt = (cls_gt==(i+1)).float().flatten(start_dim=1) - numerator = 2 * (mask * gt).sum(-1) - denominator = mask.sum(-1) + gt.sum(-1) - loss = 1 - (numerator + 1) / (denominator + 1) - losses.append(loss) - return torch.cat(losses).mean() - - -# https://stackoverflow.com/questions/63735255/how-do-i-compute-bootstrapped-cross-entropy-loss-in-pytorch -class BootstrappedCE(nn.Module): - def __init__(self, start_warm, end_warm, top_p=0.15): - super().__init__() - - self.start_warm = start_warm - self.end_warm = end_warm - self.top_p = top_p - - def forward(self, input, target, it): - if it < self.start_warm: - return F.cross_entropy(input, target), 1.0 - - raw_loss = F.cross_entropy(input, target, reduction='none').view(-1) - num_pixels = raw_loss.numel() - - if it > self.end_warm: - this_p = self.top_p - else: - this_p = self.top_p + (1-self.top_p)*((self.end_warm-it)/(self.end_warm-self.start_warm)) - loss, _ = torch.topk(raw_loss, int(num_pixels * this_p), sorted=False) - return loss.mean(), this_p - - -class LossComputer: - def __init__(self, config): - super().__init__() - self.config = config - self.bce = BootstrappedCE(config['start_warm'], config['end_warm']) - - def compute(self, data, num_objects, it): - losses = defaultdict(int) - - b, t = data['rgb'].shape[:2] - - losses['total_loss'] = 0 - for ti in range(1, t): - for bi in range(b): - loss, p = self.bce(data[f'logits_{ti}'][bi:bi+1, :num_objects[bi]+1], data['cls_gt'][bi:bi+1,ti,0], it) - losses['p'] += p / b / (t-1) - losses[f'ce_loss_{ti}'] += loss / b - - losses['total_loss'] += losses['ce_loss_%d'%ti] - losses[f'dice_loss_{ti}'] = dice_loss(data[f'masks_{ti}'], data['cls_gt'][:,ti,0]) - losses['total_loss'] += losses[f'dice_loss_{ti}'] - - return losses diff --git a/spaces/Manjushri/MusicGen/audiocraft/__init__.py b/spaces/Manjushri/MusicGen/audiocraft/__init__.py deleted file mode 100644 index 2befac60faf6f406f78ff7b7da05225dbfe7b111..0000000000000000000000000000000000000000 --- a/spaces/Manjushri/MusicGen/audiocraft/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from . import data, modules, models - -__version__ = '0.0.2a1' diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/optimizer/__init__.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/optimizer/__init__.py deleted file mode 100644 index 53c34d0470992cbc374f29681fdd00dc0e57968d..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/optimizer/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .builder import (OPTIMIZER_BUILDERS, OPTIMIZERS, build_optimizer, - build_optimizer_constructor) -from .default_constructor import DefaultOptimizerConstructor - -__all__ = [ - 'OPTIMIZER_BUILDERS', 'OPTIMIZERS', 'DefaultOptimizerConstructor', - 'build_optimizer', 'build_optimizer_constructor' -] diff --git a/spaces/MercurialAi/OncoMedleyMini/app.py b/spaces/MercurialAi/OncoMedleyMini/app.py deleted file mode 100644 index d13c0f4cd207796fab184c5eee8ab4c0dfe4fb3d..0000000000000000000000000000000000000000 --- a/spaces/MercurialAi/OncoMedleyMini/app.py +++ /dev/null @@ -1,65 +0,0 @@ -import gradio as gr -import os -os.system("pip -qq install torch") -os.system("pip -qq install pydicom") -os.system("pip -qq install torchio") -os.system("pip -qq install openai") -os.system("pip -qq install langchain") -os.system("pip -qq install scikit-learn") -os.system("pip -qq install torchvision") -import openai -import matplotlib.pyplot as plt -os.system("pip -qq install nltk") -from nltk.corpus import stopwords -import nltk -os.system('pip -qq install huggingface_hub["cli"]') - -from OncoMedley.src.clinical_only import clinical_only -from OncoMedley.chain import agent - -nltk.download('stopwords') -stop_words = set(stopwords.words('english')) - -EX_Q1 = "What is the x-coordinate of the tumor for patient ID 3? " -EX_Q2 = "get the tumor size of a breast cancer patient with tumor stage 2, Nottingham Prognostic Index value 6.044, 10 lymph nodes positive, a previous mastectomy, negative chemotherapy status, neoplasm histologic grade 3, negative PR status, 4ER integrative cluster, negative hormone therapy status, and positive ER status. " -EX_Q3 = "What are the optimal treatment options for patients with prior CDK4/6 inhibitor treatment and ESR1 wild-type tumors? " -EX_Q4 = "What testing must a patient candidate for poly ADP-ribose polymerase (PARP) inhibitor therapy undergo to determine their eligibility? " -EX_Q5 = "Should patient ID 23 receive adjuvant radiation therapy? " - -def get_response(Q): - # clear cache before generating new response - os.system('huggingface-cli delete-cache') - - Q = "Use a tool to answer this question: " + Q - - inference = agent.run(Q) - - return inference - -def bot(Q, history): - history = history or [] - c_history = list(sum(history, ())) - c_history.append(Q) - c_input = ' '.join(c_history) - output= get_response(c_input) - - history.append((Q, output)) - - return history, history - -def get_question_example(qe): - return qe - -with gr.Blocks() as iFace: - - chatbot = gr.Chatbot(show_label=False) - state = gr.State() - - Q = gr.Textbox(show_label=False, placeholder="I'm here to help.").style(container=False) - - question_example = gr.Radio(label="Inquiry Examples", choices=[EX_Q1, EX_Q2, EX_Q3, EX_Q4, EX_Q5]) - - Q.submit(bot, inputs=[Q, state], outputs=[chatbot, state]) - question_example.change(get_question_example, inputs=[question_example], outputs=Q) - -iFace.launch() diff --git a/spaces/MetaWabbit/Auto-GPT/autogpt/commands/write_tests.py b/spaces/MetaWabbit/Auto-GPT/autogpt/commands/write_tests.py deleted file mode 100644 index 35a086536c9d05d520a84b15ead49f775eacdcc9..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/autogpt/commands/write_tests.py +++ /dev/null @@ -1,31 +0,0 @@ -"""A module that contains a function to generate test cases for the submitted code.""" -from __future__ import annotations - -import json - -from autogpt.llm_utils import call_ai_function - - -def write_tests(code: str, focus: list[str]) -> str: - """ - A function that takes in code and focus topics and returns a response from create - chat completion api call. - - Parameters: - focus (list): A list of suggestions around what needs to be improved. - code (str): Code for test cases to be generated against. - Returns: - A result string from create chat completion. Test cases for the submitted code - in response. - """ - - function_string = ( - "def create_test_cases(code: str, focus: Optional[str] = None) -> str:" - ) - args = [code, json.dumps(focus)] - description_string = ( - "Generates test cases for the existing code, focusing on" - " specific areas if required." - ) - - return call_ai_function(function_string, args, description_string) diff --git a/spaces/MrD05/text-generation-webui-space/README.md b/spaces/MrD05/text-generation-webui-space/README.md deleted file mode 100644 index ff6758e9a243e97eb851b158660f692bc0e20e6f..0000000000000000000000000000000000000000 --- a/spaces/MrD05/text-generation-webui-space/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Text Generation Webui Space -emoji: 🏃 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.20.1 -app_file: run.py -pinned: false -license: mit -duplicated_from: dwolfe66/text-generation-webui-space ---- - -Check out this repo https://github.com/oobabooga/text-generation-webui diff --git a/spaces/MrYXJ/calculate-model-flops/__init__.py b/spaces/MrYXJ/calculate-model-flops/__init__.py deleted file mode 100644 index 3a0ca78a7a8f9c5c161d3162cde07b7e05371e6f..0000000000000000000000000000000000000000 --- a/spaces/MrYXJ/calculate-model-flops/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# !usr/bin/env python -# -*- coding:utf-8 -*- - -''' - Description : - Version : 1.0 - Author : MrYXJ - Mail : yxj2017@gmail.com - Github : https://github.com/MrYxJ - Date : 2023-09-05 23:26:00 - LastEditTime : 2023-09-05 23:26:02 - Copyright (C) 2023 mryxj. All rights reserved. -''' - - diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/visualization_utils.py b/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/visualization_utils.py deleted file mode 100644 index db4af8089df673cd5c57c4a020b5d7e8f03846c9..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/visualization_utils.py +++ /dev/null @@ -1,733 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""A set of functions that are used for visualization. - -These functions often receive an image, perform some visualization on the image. -The functions do not return a value, instead they modify the image itself. - -""" -import collections -import functools -from absl import logging -# Set headless-friendly backend. -import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements -import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top -import numpy as np -import PIL.Image as Image -import PIL.ImageColor as ImageColor -import PIL.ImageDraw as ImageDraw -import PIL.ImageFont as ImageFont -import six -import tensorflow as tf - -from official.vision.detection.utils import box_utils -from official.vision.detection.utils.object_detection import shape_utils - - -_TITLE_LEFT_MARGIN = 10 -_TITLE_TOP_MARGIN = 10 -STANDARD_COLORS = [ - 'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque', - 'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite', - 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan', - 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange', - 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet', - 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite', - 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod', - 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki', - 'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue', - 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey', - 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue', - 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime', - 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid', - 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen', - 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin', - 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed', - 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed', - 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple', - 'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown', - 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue', - 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow', - 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White', - 'WhiteSmoke', 'Yellow', 'YellowGreen' -] - - -def save_image_array_as_png(image, output_path): - """Saves an image (represented as a numpy array) to PNG. - - Args: - image: a numpy array with shape [height, width, 3]. - output_path: path to which image should be written. - """ - image_pil = Image.fromarray(np.uint8(image)).convert('RGB') - with tf.io.gfile.GFile(output_path, 'w') as fid: - image_pil.save(fid, 'PNG') - - -def encode_image_array_as_png_str(image): - """Encodes a numpy array into a PNG string. - - Args: - image: a numpy array with shape [height, width, 3]. - - Returns: - PNG encoded image string. - """ - image_pil = Image.fromarray(np.uint8(image)) - output = six.BytesIO() - image_pil.save(output, format='PNG') - png_string = output.getvalue() - output.close() - return png_string - - -def visualize_images_with_bounding_boxes(images, box_outputs, step, - summary_writer): - """Records subset of evaluation images with bounding boxes.""" - if not isinstance(images, list): - logging.warning('visualize_images_with_bounding_boxes expects list of ' - 'images but received type: %s and value: %s', - type(images), images) - return - - image_shape = tf.shape(images[0]) - image_height = tf.cast(image_shape[0], tf.float32) - image_width = tf.cast(image_shape[1], tf.float32) - normalized_boxes = box_utils.normalize_boxes(box_outputs, - [image_height, image_width]) - - bounding_box_color = tf.constant([[1.0, 1.0, 0.0, 1.0]]) - image_summary = tf.image.draw_bounding_boxes( - tf.cast(images, tf.float32), normalized_boxes, bounding_box_color) - with summary_writer.as_default(): - tf.summary.image('bounding_box_summary', image_summary, step=step) - summary_writer.flush() - - -def draw_bounding_box_on_image_array(image, - ymin, - xmin, - ymax, - xmax, - color='red', - thickness=4, - display_str_list=(), - use_normalized_coordinates=True): - """Adds a bounding box to an image (numpy array). - - Bounding box coordinates can be specified in either absolute (pixel) or - normalized coordinates by setting the use_normalized_coordinates argument. - - Args: - image: a numpy array with shape [height, width, 3]. - ymin: ymin of bounding box. - xmin: xmin of bounding box. - ymax: ymax of bounding box. - xmax: xmax of bounding box. - color: color to draw bounding box. Default is red. - thickness: line thickness. Default value is 4. - display_str_list: list of strings to display in box - (each to be shown on its own line). - use_normalized_coordinates: If True (default), treat coordinates - ymin, xmin, ymax, xmax as relative to the image. Otherwise treat - coordinates as absolute. - """ - image_pil = Image.fromarray(np.uint8(image)).convert('RGB') - draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color, - thickness, display_str_list, - use_normalized_coordinates) - np.copyto(image, np.array(image_pil)) - - -def draw_bounding_box_on_image(image, - ymin, - xmin, - ymax, - xmax, - color='red', - thickness=4, - display_str_list=(), - use_normalized_coordinates=True): - """Adds a bounding box to an image. - - Bounding box coordinates can be specified in either absolute (pixel) or - normalized coordinates by setting the use_normalized_coordinates argument. - - Each string in display_str_list is displayed on a separate line above the - bounding box in black text on a rectangle filled with the input 'color'. - If the top of the bounding box extends to the edge of the image, the strings - are displayed below the bounding box. - - Args: - image: a PIL.Image object. - ymin: ymin of bounding box. - xmin: xmin of bounding box. - ymax: ymax of bounding box. - xmax: xmax of bounding box. - color: color to draw bounding box. Default is red. - thickness: line thickness. Default value is 4. - display_str_list: list of strings to display in box - (each to be shown on its own line). - use_normalized_coordinates: If True (default), treat coordinates - ymin, xmin, ymax, xmax as relative to the image. Otherwise treat - coordinates as absolute. - """ - draw = ImageDraw.Draw(image) - im_width, im_height = image.size - if use_normalized_coordinates: - (left, right, top, bottom) = (xmin * im_width, xmax * im_width, - ymin * im_height, ymax * im_height) - else: - (left, right, top, bottom) = (xmin, xmax, ymin, ymax) - draw.line([(left, top), (left, bottom), (right, bottom), - (right, top), (left, top)], width=thickness, fill=color) - try: - font = ImageFont.truetype('arial.ttf', 24) - except IOError: - font = ImageFont.load_default() - - # If the total height of the display strings added to the top of the bounding - # box exceeds the top of the image, stack the strings below the bounding box - # instead of above. - display_str_heights = [font.getsize(ds)[1] for ds in display_str_list] - # Each display_str has a top and bottom margin of 0.05x. - total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights) - - if top > total_display_str_height: - text_bottom = top - else: - text_bottom = bottom + total_display_str_height - # Reverse list and print from bottom to top. - for display_str in display_str_list[::-1]: - text_width, text_height = font.getsize(display_str) - margin = np.ceil(0.05 * text_height) - draw.rectangle( - [(left, text_bottom - text_height - 2 * margin), (left + text_width, - text_bottom)], - fill=color) - draw.text( - (left + margin, text_bottom - text_height - margin), - display_str, - fill='black', - font=font) - text_bottom -= text_height - 2 * margin - - -def draw_bounding_boxes_on_image_array(image, - boxes, - color='red', - thickness=4, - display_str_list_list=()): - """Draws bounding boxes on image (numpy array). - - Args: - image: a numpy array object. - boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). - The coordinates are in normalized format between [0, 1]. - color: color to draw bounding box. Default is red. - thickness: line thickness. Default value is 4. - display_str_list_list: list of list of strings. - a list of strings for each bounding box. - The reason to pass a list of strings for a - bounding box is that it might contain - multiple labels. - - Raises: - ValueError: if boxes is not a [N, 4] array - """ - image_pil = Image.fromarray(image) - draw_bounding_boxes_on_image(image_pil, boxes, color, thickness, - display_str_list_list) - np.copyto(image, np.array(image_pil)) - - -def draw_bounding_boxes_on_image(image, - boxes, - color='red', - thickness=4, - display_str_list_list=()): - """Draws bounding boxes on image. - - Args: - image: a PIL.Image object. - boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). - The coordinates are in normalized format between [0, 1]. - color: color to draw bounding box. Default is red. - thickness: line thickness. Default value is 4. - display_str_list_list: list of list of strings. - a list of strings for each bounding box. - The reason to pass a list of strings for a - bounding box is that it might contain - multiple labels. - - Raises: - ValueError: if boxes is not a [N, 4] array - """ - boxes_shape = boxes.shape - if not boxes_shape: - return - if len(boxes_shape) != 2 or boxes_shape[1] != 4: - raise ValueError('Input must be of size [N, 4]') - for i in range(boxes_shape[0]): - display_str_list = () - if display_str_list_list: - display_str_list = display_str_list_list[i] - draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2], - boxes[i, 3], color, thickness, display_str_list) - - -def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs): - return visualize_boxes_and_labels_on_image_array( - image, boxes, classes, scores, category_index=category_index, **kwargs) - - -def _visualize_boxes_and_masks(image, boxes, classes, scores, masks, - category_index, **kwargs): - return visualize_boxes_and_labels_on_image_array( - image, - boxes, - classes, - scores, - category_index=category_index, - instance_masks=masks, - **kwargs) - - -def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints, - category_index, **kwargs): - return visualize_boxes_and_labels_on_image_array( - image, - boxes, - classes, - scores, - category_index=category_index, - keypoints=keypoints, - **kwargs) - - -def _visualize_boxes_and_masks_and_keypoints( - image, boxes, classes, scores, masks, keypoints, category_index, **kwargs): - return visualize_boxes_and_labels_on_image_array( - image, - boxes, - classes, - scores, - category_index=category_index, - instance_masks=masks, - keypoints=keypoints, - **kwargs) - - -def _resize_original_image(image, image_shape): - image = tf.expand_dims(image, 0) - image = tf.image.resize( - image, image_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) - return tf.cast(tf.squeeze(image, 0), tf.uint8) - - -def draw_bounding_boxes_on_image_tensors(images, - boxes, - classes, - scores, - category_index, - original_image_spatial_shape=None, - true_image_shape=None, - instance_masks=None, - keypoints=None, - max_boxes_to_draw=20, - min_score_thresh=0.2, - use_normalized_coordinates=True): - """Draws bounding boxes, masks, and keypoints on batch of image tensors. - - Args: - images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional - channels will be ignored. If C = 1, then we convert the images to RGB - images. - boxes: [N, max_detections, 4] float32 tensor of detection boxes. - classes: [N, max_detections] int tensor of detection classes. Note that - classes are 1-indexed. - scores: [N, max_detections] float32 tensor of detection scores. - category_index: a dict that maps integer ids to category dicts. e.g. - {1: {1: 'dog'}, 2: {2: 'cat'}, ...} - original_image_spatial_shape: [N, 2] tensor containing the spatial size of - the original image. - true_image_shape: [N, 3] tensor containing the spatial size of unpadded - original_image. - instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with - instance masks. - keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2] - with keypoints. - max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20. - min_score_thresh: Minimum score threshold for visualization. Default 0.2. - use_normalized_coordinates: Whether to assume boxes and kepoints are in - normalized coordinates (as opposed to absolute coordiantes). - Default is True. - - Returns: - 4D image tensor of type uint8, with boxes drawn on top. - """ - # Additional channels are being ignored. - if images.shape[3] > 3: - images = images[:, :, :, 0:3] - elif images.shape[3] == 1: - images = tf.image.grayscale_to_rgb(images) - visualization_keyword_args = { - 'use_normalized_coordinates': use_normalized_coordinates, - 'max_boxes_to_draw': max_boxes_to_draw, - 'min_score_thresh': min_score_thresh, - 'agnostic_mode': False, - 'line_thickness': 4 - } - if true_image_shape is None: - true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3]) - else: - true_shapes = true_image_shape - if original_image_spatial_shape is None: - original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2]) - else: - original_shapes = original_image_spatial_shape - - if instance_masks is not None and keypoints is None: - visualize_boxes_fn = functools.partial( - _visualize_boxes_and_masks, - category_index=category_index, - **visualization_keyword_args) - elems = [ - true_shapes, original_shapes, images, boxes, classes, scores, - instance_masks - ] - elif instance_masks is None and keypoints is not None: - visualize_boxes_fn = functools.partial( - _visualize_boxes_and_keypoints, - category_index=category_index, - **visualization_keyword_args) - elems = [ - true_shapes, original_shapes, images, boxes, classes, scores, keypoints - ] - elif instance_masks is not None and keypoints is not None: - visualize_boxes_fn = functools.partial( - _visualize_boxes_and_masks_and_keypoints, - category_index=category_index, - **visualization_keyword_args) - elems = [ - true_shapes, original_shapes, images, boxes, classes, scores, - instance_masks, keypoints - ] - else: - visualize_boxes_fn = functools.partial( - _visualize_boxes, - category_index=category_index, - **visualization_keyword_args) - elems = [ - true_shapes, original_shapes, images, boxes, classes, scores - ] - - def draw_boxes(image_and_detections): - """Draws boxes on image.""" - true_shape = image_and_detections[0] - original_shape = image_and_detections[1] - if true_image_shape is not None: - image = shape_utils.pad_or_clip_nd( - image_and_detections[2], [true_shape[0], true_shape[1], 3]) - if original_image_spatial_shape is not None: - image_and_detections[2] = _resize_original_image(image, original_shape) - - image_with_boxes = tf.compat.v1.py_func(visualize_boxes_fn, - image_and_detections[2:], tf.uint8) - return image_with_boxes - - images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False) - return images - - -def draw_keypoints_on_image_array(image, - keypoints, - color='red', - radius=2, - use_normalized_coordinates=True): - """Draws keypoints on an image (numpy array). - - Args: - image: a numpy array with shape [height, width, 3]. - keypoints: a numpy array with shape [num_keypoints, 2]. - color: color to draw the keypoints with. Default is red. - radius: keypoint radius. Default value is 2. - use_normalized_coordinates: if True (default), treat keypoint values as - relative to the image. Otherwise treat them as absolute. - """ - image_pil = Image.fromarray(np.uint8(image)).convert('RGB') - draw_keypoints_on_image(image_pil, keypoints, color, radius, - use_normalized_coordinates) - np.copyto(image, np.array(image_pil)) - - -def draw_keypoints_on_image(image, - keypoints, - color='red', - radius=2, - use_normalized_coordinates=True): - """Draws keypoints on an image. - - Args: - image: a PIL.Image object. - keypoints: a numpy array with shape [num_keypoints, 2]. - color: color to draw the keypoints with. Default is red. - radius: keypoint radius. Default value is 2. - use_normalized_coordinates: if True (default), treat keypoint values as - relative to the image. Otherwise treat them as absolute. - """ - draw = ImageDraw.Draw(image) - im_width, im_height = image.size - keypoints_x = [k[1] for k in keypoints] - keypoints_y = [k[0] for k in keypoints] - if use_normalized_coordinates: - keypoints_x = tuple([im_width * x for x in keypoints_x]) - keypoints_y = tuple([im_height * y for y in keypoints_y]) - for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y): - draw.ellipse([(keypoint_x - radius, keypoint_y - radius), - (keypoint_x + radius, keypoint_y + radius)], - outline=color, fill=color) - - -def draw_mask_on_image_array(image, mask, color='red', alpha=0.4): - """Draws mask on an image. - - Args: - image: uint8 numpy array with shape (img_height, img_height, 3) - mask: a uint8 numpy array of shape (img_height, img_height) with - values between either 0 or 1. - color: color to draw the keypoints with. Default is red. - alpha: transparency value between 0 and 1. (default: 0.4) - - Raises: - ValueError: On incorrect data type for image or masks. - """ - if image.dtype != np.uint8: - raise ValueError('`image` not of type np.uint8') - if mask.dtype != np.uint8: - raise ValueError('`mask` not of type np.uint8') - if np.any(np.logical_and(mask != 1, mask != 0)): - raise ValueError('`mask` elements should be in [0, 1]') - if image.shape[:2] != mask.shape: - raise ValueError('The image has spatial dimensions %s but the mask has ' - 'dimensions %s' % (image.shape[:2], mask.shape)) - rgb = ImageColor.getrgb(color) - pil_image = Image.fromarray(image) - - solid_color = np.expand_dims( - np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3]) - pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA') - pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L') - pil_image = Image.composite(pil_solid_color, pil_image, pil_mask) - np.copyto(image, np.array(pil_image.convert('RGB'))) - - -def visualize_boxes_and_labels_on_image_array( - image, - boxes, - classes, - scores, - category_index, - instance_masks=None, - instance_boundaries=None, - keypoints=None, - use_normalized_coordinates=False, - max_boxes_to_draw=20, - min_score_thresh=.5, - agnostic_mode=False, - line_thickness=4, - groundtruth_box_visualization_color='black', - skip_scores=False, - skip_labels=False): - """Overlay labeled boxes on an image with formatted scores and label names. - - This function groups boxes that correspond to the same location - and creates a display string for each detection and overlays these - on the image. Note that this function modifies the image in place, and returns - that same image. - - Args: - image: uint8 numpy array with shape (img_height, img_width, 3) - boxes: a numpy array of shape [N, 4] - classes: a numpy array of shape [N]. Note that class indices are 1-based, - and match the keys in the label map. - scores: a numpy array of shape [N] or None. If scores=None, then - this function assumes that the boxes to be plotted are groundtruth - boxes and plot all boxes as black with no classes or scores. - category_index: a dict containing category dictionaries (each holding - category index `id` and category name `name`) keyed by category indices. - instance_masks: a numpy array of shape [N, image_height, image_width] with - values ranging between 0 and 1, can be None. - instance_boundaries: a numpy array of shape [N, image_height, image_width] - with values ranging between 0 and 1, can be None. - keypoints: a numpy array of shape [N, num_keypoints, 2], can - be None - use_normalized_coordinates: whether boxes is to be interpreted as - normalized coordinates or not. - max_boxes_to_draw: maximum number of boxes to visualize. If None, draw - all boxes. - min_score_thresh: minimum score threshold for a box to be visualized - agnostic_mode: boolean (default: False) controlling whether to evaluate in - class-agnostic mode or not. This mode will display scores but ignore - classes. - line_thickness: integer (default: 4) controlling line width of the boxes. - groundtruth_box_visualization_color: box color for visualizing groundtruth - boxes - skip_scores: whether to skip score when drawing a single detection - skip_labels: whether to skip label when drawing a single detection - - Returns: - uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes. - """ - # Create a display string (and color) for every box location, group any boxes - # that correspond to the same location. - box_to_display_str_map = collections.defaultdict(list) - box_to_color_map = collections.defaultdict(str) - box_to_instance_masks_map = {} - box_to_instance_boundaries_map = {} - box_to_keypoints_map = collections.defaultdict(list) - if not max_boxes_to_draw: - max_boxes_to_draw = boxes.shape[0] - for i in range(min(max_boxes_to_draw, boxes.shape[0])): - if scores is None or scores[i] > min_score_thresh: - box = tuple(boxes[i].tolist()) - if instance_masks is not None: - box_to_instance_masks_map[box] = instance_masks[i] - if instance_boundaries is not None: - box_to_instance_boundaries_map[box] = instance_boundaries[i] - if keypoints is not None: - box_to_keypoints_map[box].extend(keypoints[i]) - if scores is None: - box_to_color_map[box] = groundtruth_box_visualization_color - else: - display_str = '' - if not skip_labels: - if not agnostic_mode: - if classes[i] in category_index.keys(): - class_name = category_index[classes[i]]['name'] - else: - class_name = 'N/A' - display_str = str(class_name) - if not skip_scores: - if not display_str: - display_str = '{}%'.format(int(100*scores[i])) - else: - display_str = '{}: {}%'.format(display_str, int(100*scores[i])) - box_to_display_str_map[box].append(display_str) - if agnostic_mode: - box_to_color_map[box] = 'DarkOrange' - else: - box_to_color_map[box] = STANDARD_COLORS[ - classes[i] % len(STANDARD_COLORS)] - - # Draw all boxes onto image. - for box, color in box_to_color_map.items(): - ymin, xmin, ymax, xmax = box - if instance_masks is not None: - draw_mask_on_image_array( - image, - box_to_instance_masks_map[box], - color=color - ) - if instance_boundaries is not None: - draw_mask_on_image_array( - image, - box_to_instance_boundaries_map[box], - color='red', - alpha=1.0 - ) - draw_bounding_box_on_image_array( - image, - ymin, - xmin, - ymax, - xmax, - color=color, - thickness=line_thickness, - display_str_list=box_to_display_str_map[box], - use_normalized_coordinates=use_normalized_coordinates) - if keypoints is not None: - draw_keypoints_on_image_array( - image, - box_to_keypoints_map[box], - color=color, - radius=line_thickness / 2, - use_normalized_coordinates=use_normalized_coordinates) - - return image - - -def add_cdf_image_summary(values, name): - """Adds a tf.summary.image for a CDF plot of the values. - - Normalizes `values` such that they sum to 1, plots the cumulative distribution - function and creates a tf image summary. - - Args: - values: a 1-D float32 tensor containing the values. - name: name for the image summary. - """ - def cdf_plot(values): - """Numpy function to plot CDF.""" - normalized_values = values / np.sum(values) - sorted_values = np.sort(normalized_values) - cumulative_values = np.cumsum(sorted_values) - fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32) - / cumulative_values.size) - fig = plt.figure(frameon=False) - ax = fig.add_subplot('111') - ax.plot(fraction_of_examples, cumulative_values) - ax.set_ylabel('cumulative normalized values') - ax.set_xlabel('fraction of examples') - fig.canvas.draw() - width, height = fig.get_size_inches() * fig.get_dpi() - image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape( - 1, int(height), int(width), 3) - return image - - cdf_plot = tf.compat.v1.py_func(cdf_plot, [values], tf.uint8) - tf.compat.v1.summary.image(name, cdf_plot) - - -def add_hist_image_summary(values, bins, name): - """Adds a tf.summary.image for a histogram plot of the values. - - Plots the histogram of values and creates a tf image summary. - - Args: - values: a 1-D float32 tensor containing the values. - bins: bin edges which will be directly passed to np.histogram. - name: name for the image summary. - """ - - def hist_plot(values, bins): - """Numpy function to plot hist.""" - fig = plt.figure(frameon=False) - ax = fig.add_subplot('111') - y, x = np.histogram(values, bins=bins) - ax.plot(x[:-1], y) - ax.set_ylabel('count') - ax.set_xlabel('value') - fig.canvas.draw() - width, height = fig.get_size_inches() * fig.get_dpi() - image = np.fromstring( - fig.canvas.tostring_rgb(), dtype='uint8').reshape( - 1, int(height), int(width), 3) - return image - - hist_plot = tf.compat.v1.py_func(hist_plot, [values, bins], tf.uint8) - tf.compat.v1.summary.image(name, hist_plot) diff --git a/spaces/NCTCMumbai/NCTC/models/research/compression/entropy_coder/all_models/all_models_test.py b/spaces/NCTCMumbai/NCTC/models/research/compression/entropy_coder/all_models/all_models_test.py deleted file mode 100644 index b8aff504a0a00d579d1b2768164b78b6c095b235..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/compression/entropy_coder/all_models/all_models_test.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Basic test of all registered models.""" - -import tensorflow as tf - -# pylint: disable=unused-import -import all_models -# pylint: enable=unused-import -from entropy_coder.model import model_factory - - -class AllModelsTest(tf.test.TestCase): - - def testBuildModelForTraining(self): - factory = model_factory.GetModelRegistry() - model_names = factory.GetAvailableModels() - - for m in model_names: - tf.reset_default_graph() - - global_step = tf.Variable(tf.zeros([], dtype=tf.int64), - trainable=False, - name='global_step') - - optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1) - - batch_size = 3 - height = 40 - width = 20 - depth = 5 - binary_codes = tf.placeholder(dtype=tf.float32, - shape=[batch_size, height, width, depth]) - - # Create a model with the default configuration. - print('Creating model: {}'.format(m)) - model = factory.CreateModel(m) - model.Initialize(global_step, - optimizer, - model.GetConfigStringForUnitTest()) - self.assertTrue(model.loss is None, 'model: {}'.format(m)) - self.assertTrue(model.train_op is None, 'model: {}'.format(m)) - self.assertTrue(model.average_code_length is None, 'model: {}'.format(m)) - - # Build the Tensorflow graph corresponding to the model. - model.BuildGraph(binary_codes) - self.assertTrue(model.loss is not None, 'model: {}'.format(m)) - self.assertTrue(model.average_code_length is not None, - 'model: {}'.format(m)) - if model.train_op is None: - print('Model {} is not trainable'.format(m)) - - -if __name__ == '__main__': - tf.test.main() diff --git a/spaces/NbAiLab/maken-clip-text/app.py b/spaces/NbAiLab/maken-clip-text/app.py deleted file mode 100644 index 1ed683a3357cd50612c6589b42983490bf6c6118..0000000000000000000000000000000000000000 --- a/spaces/NbAiLab/maken-clip-text/app.py +++ /dev/null @@ -1,120 +0,0 @@ -import os - -from pathlib import Path -import pandas as pd, numpy as np -from transformers import CLIPProcessor, CLIPTextModel, CLIPModel -import torch -from torch import nn -import gradio as gr -import requests -from PIL import Image, ImageFile -ImageFile.LOAD_TRUNCATED_IMAGES = True - - -LABELS = Path('class_names.txt').read_text().splitlines() -class_model = nn.Sequential( - nn.Conv2d(1, 32, 3, padding='same'), - nn.ReLU(), - nn.MaxPool2d(2), - nn.Conv2d(32, 64, 3, padding='same'), - nn.ReLU(), - nn.MaxPool2d(2), - nn.Conv2d(64, 128, 3, padding='same'), - nn.ReLU(), - nn.MaxPool2d(2), - nn.Flatten(), - nn.Linear(1152, 256), - nn.ReLU(), - nn.Linear(256, len(LABELS)), -) -state_dict = torch.load('pytorch_model.bin', map_location='cpu') -class_model.load_state_dict(state_dict, strict=False) -class_model.eval() - - -model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") -processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") -df = pd.read_csv('clip.csv') -embeddings_npy = np.load('clip.npy') -embeddings = np.divide(embeddings_npy, np.sqrt(np.sum(embeddings_npy**2, axis=1, keepdims=True))) - - -def compute_text_embeddings(list_of_strings): - inputs = processor(text=list_of_strings, return_tensors="pt", padding=True) - return model.get_text_features(**inputs) - - -def compute_image_embeddings(list_of_images): - inputs = processor(images=list_of_images, return_tensors="pt", padding=True) - return model.get_image_features(**inputs) - - -def load_image(image, same_height=False): - # im = Image.open(path) - im = Image.fromarray(np.uint8(image)) - if im.mode != 'RGB': - im = im.convert('RGB') - if same_height: - ratio = 224/im.size[1] - return im.resize((int(im.size[0]*ratio), int(im.size[1]*ratio))) - else: - ratio = 224/min(im.size) - return im.resize((int(im.size[0]*ratio), int(im.size[1]*ratio))) - - -def download_img(identifier, url): - local_path = f"{identifier}.jpg" - if not os.path.isfile(local_path): - img_data = requests.get(url).content - with open(local_path, 'wb') as handler: - handler.write(img_data) - return local_path - - -def predict(image=None, text=None, sketch=None): - if image is not None: - input_embeddings = compute_image_embeddings([load_image(image)]).detach().numpy() - topk = {"local": 100} - else: - if text: - query = text - topk = {text: 100} - else: - x = torch.tensor(sketch, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255. - with torch.no_grad(): - out = class_model(x) - probabilities = torch.nn.functional.softmax(out[0], dim=0) - values, indices = torch.topk(probabilities, 5) - query = LABELS[indices[0]] - topk = {LABELS[i]: v.item() for i, v in zip(indices, values)} - input_embeddings = compute_text_embeddings([query]).detach().numpy() - - n_results = 3 - results = np.argsort((embeddings @ input_embeddings.T)[:, 0])[-1:-n_results - 1:-1] - outputs = [download_img(df.iloc[i]['id'], df.iloc[i]['thumbnail']) for i in results] - outputs.insert(0, topk) - print(outputs) - return outputs - - -def predict_text(text): - return predict(None, text, None) - - -title = "Type to search in the Nasjonalbiblioteket" -description = "Find images in the Nasjonalbiblioteket image collections based on what you type" -interface = gr.Interface( - fn=predict_text, - inputs=["text"], - outputs=[gr.outputs.Label(num_top_classes=3), gr.outputs.Image(type="filepath"), gr.outputs.Image(type="filepath"), gr.outputs.Image(type="filepath")], - title=title, - description=description, - #live=True, - examples=[ - ["kids playing in the snow"], - ["walking in the dark"], - ["woman sitting on a chair while drinking a beer"], - ["nice view out the window on a train"], - ], -) -interface.launch(debug=True) diff --git a/spaces/NimaBoscarino/climategan/climategan/blocks.py b/spaces/NimaBoscarino/climategan/climategan/blocks.py deleted file mode 100644 index 8cab41f528d47c859fd17167092aabd6bcc359cd..0000000000000000000000000000000000000000 --- a/spaces/NimaBoscarino/climategan/climategan/blocks.py +++ /dev/null @@ -1,398 +0,0 @@ -"""File for all blocks which are parts of decoders -""" -import torch -import torch.nn as nn -import torch.nn.functional as F - -import climategan.strings as strings -from climategan.norms import SPADE, AdaptiveInstanceNorm2d, LayerNorm, SpectralNorm - - -class InterpolateNearest2d(nn.Module): - """ - Custom implementation of nn.Upsample because pytorch/xla - does not yet support scale_factor and needs to be provided with - the output_size - """ - - def __init__(self, scale_factor=2): - """ - Create an InterpolateNearest2d module - - Args: - scale_factor (int, optional): Output size multiplier. Defaults to 2. - """ - super().__init__() - self.scale_factor = scale_factor - - def forward(self, x): - """ - Interpolate x in "nearest" mode on its last 2 dimensions - - Args: - x (torch.Tensor): input to interpolate - - Returns: - torch.Tensor: upsampled tensor with shape - (...x.shape, x.shape[-2] * scale_factor, x.shape[-1] * scale_factor) - """ - return F.interpolate( - x, - size=(x.shape[-2] * self.scale_factor, x.shape[-1] * self.scale_factor), - mode="nearest", - ) - - -# ----------------------------------------- -# ----- Generic Convolutional Block ----- -# ----------------------------------------- -class Conv2dBlock(nn.Module): - def __init__( - self, - input_dim, - output_dim, - kernel_size, - stride=1, - padding=0, - dilation=1, - norm="none", - activation="relu", - pad_type="zero", - bias=True, - ): - super().__init__() - self.use_bias = bias - # initialize padding - if pad_type == "reflect": - self.pad = nn.ReflectionPad2d(padding) - elif pad_type == "replicate": - self.pad = nn.ReplicationPad2d(padding) - elif pad_type == "zero": - self.pad = nn.ZeroPad2d(padding) - else: - assert 0, "Unsupported padding type: {}".format(pad_type) - - # initialize normalization - use_spectral_norm = False - if norm.startswith("spectral_"): - norm = norm.replace("spectral_", "") - use_spectral_norm = True - - norm_dim = output_dim - if norm == "batch": - self.norm = nn.BatchNorm2d(norm_dim) - elif norm == "instance": - # self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True) - self.norm = nn.InstanceNorm2d(norm_dim) - elif norm == "layer": - self.norm = LayerNorm(norm_dim) - elif norm == "adain": - self.norm = AdaptiveInstanceNorm2d(norm_dim) - elif norm == "spectral" or norm.startswith("spectral_"): - self.norm = None # dealt with later in the code - elif norm == "none": - self.norm = None - else: - raise ValueError("Unsupported normalization: {}".format(norm)) - - # initialize activation - if activation == "relu": - self.activation = nn.ReLU(inplace=False) - elif activation == "lrelu": - self.activation = nn.LeakyReLU(0.2, inplace=False) - elif activation == "prelu": - self.activation = nn.PReLU() - elif activation == "selu": - self.activation = nn.SELU(inplace=False) - elif activation == "tanh": - self.activation = nn.Tanh() - elif activation == "sigmoid": - self.activation = nn.Sigmoid() - elif activation == "none": - self.activation = None - else: - raise ValueError("Unsupported activation: {}".format(activation)) - - # initialize convolution - if norm == "spectral" or use_spectral_norm: - self.conv = SpectralNorm( - nn.Conv2d( - input_dim, - output_dim, - kernel_size, - stride, - dilation=dilation, - bias=self.use_bias, - ) - ) - else: - self.conv = nn.Conv2d( - input_dim, - output_dim, - kernel_size, - stride, - dilation=dilation, - bias=self.use_bias if norm != "batch" else False, - ) - - def forward(self, x): - x = self.conv(self.pad(x)) - if self.norm is not None: - x = self.norm(x) - if self.activation is not None: - x = self.activation(x) - return x - - def __str__(self): - return strings.conv2dblock(self) - - -# ----------------------------- -# ----- Residual Blocks ----- -# ----------------------------- -class ResBlocks(nn.Module): - """ - From https://github.com/NVlabs/MUNIT/blob/master/networks.py - """ - - def __init__(self, num_blocks, dim, norm="in", activation="relu", pad_type="zero"): - super().__init__() - self.model = nn.Sequential( - *[ - ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type) - for _ in range(num_blocks) - ] - ) - - def forward(self, x): - return self.model(x) - - def __str__(self): - return strings.resblocks(self) - - -class ResBlock(nn.Module): - def __init__(self, dim, norm="in", activation="relu", pad_type="zero"): - super().__init__() - self.dim = dim - self.norm = norm - self.activation = activation - model = [] - model += [ - Conv2dBlock( - dim, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type - ) - ] - model += [ - Conv2dBlock( - dim, dim, 3, 1, 1, norm=norm, activation="none", pad_type=pad_type - ) - ] - self.model = nn.Sequential(*model) - - def forward(self, x): - residual = x - out = self.model(x) - out += residual - return out - - def __str__(self): - return strings.resblock(self) - - -# -------------------------- -# ----- Base Decoder ----- -# -------------------------- -class BaseDecoder(nn.Module): - def __init__( - self, - n_upsample=4, - n_res=4, - input_dim=2048, - proj_dim=64, - output_dim=3, - norm="batch", - activ="relu", - pad_type="zero", - output_activ="tanh", - low_level_feats_dim=-1, - use_dada=False, - ): - super().__init__() - - self.low_level_feats_dim = low_level_feats_dim - self.use_dada = use_dada - - self.model = [] - if proj_dim != -1: - self.proj_conv = Conv2dBlock( - input_dim, proj_dim, 1, 1, 0, norm=norm, activation=activ - ) - else: - self.proj_conv = None - proj_dim = input_dim - - if low_level_feats_dim > 0: - self.low_level_conv = Conv2dBlock( - input_dim=low_level_feats_dim, - output_dim=proj_dim, - kernel_size=3, - stride=1, - padding=1, - pad_type=pad_type, - norm=norm, - activation=activ, - ) - self.merge_feats_conv = Conv2dBlock( - input_dim=2 * proj_dim, - output_dim=proj_dim, - kernel_size=1, - stride=1, - padding=0, - pad_type=pad_type, - norm=norm, - activation=activ, - ) - else: - self.low_level_conv = None - - self.model += [ResBlocks(n_res, proj_dim, norm, activ, pad_type=pad_type)] - dim = proj_dim - # upsampling blocks - for i in range(n_upsample): - self.model += [ - InterpolateNearest2d(scale_factor=2), - Conv2dBlock( - input_dim=dim, - output_dim=dim // 2, - kernel_size=3, - stride=1, - padding=1, - pad_type=pad_type, - norm=norm, - activation=activ, - ), - ] - dim //= 2 - # use reflection padding in the last conv layer - self.model += [ - Conv2dBlock( - input_dim=dim, - output_dim=output_dim, - kernel_size=3, - stride=1, - padding=1, - pad_type=pad_type, - norm="none", - activation=output_activ, - ) - ] - self.model = nn.Sequential(*self.model) - - def forward(self, z, cond=None, z_depth=None): - low_level_feat = None - if isinstance(z, (list, tuple)): - if self.low_level_conv is None: - z = z[0] - else: - z, low_level_feat = z - low_level_feat = self.low_level_conv(low_level_feat) - low_level_feat = F.interpolate( - low_level_feat, size=z.shape[-2:], mode="bilinear" - ) - - if z_depth is not None and self.use_dada: - z = z * z_depth - - if self.proj_conv is not None: - z = self.proj_conv(z) - - if low_level_feat is not None: - z = self.merge_feats_conv(torch.cat([low_level_feat, z], dim=1)) - - return self.model(z) - - def __str__(self): - return strings.basedecoder(self) - - -# -------------------------- -# ----- SPADE Blocks ----- -# -------------------------- -# https://github.com/NVlabs/SPADE/blob/0ff661e70131c9b85091d11a66e019c0f2062d4c -# /models/networks/generator.py -# 0ff661e on 13 Apr 2019 -class SPADEResnetBlock(nn.Module): - def __init__( - self, - fin, - fout, - cond_nc, - spade_use_spectral_norm, - spade_param_free_norm, - spade_kernel_size, - last_activation=None, - ): - super().__init__() - # Attributes - - self.fin = fin - self.fout = fout - self.use_spectral_norm = spade_use_spectral_norm - self.param_free_norm = spade_param_free_norm - self.kernel_size = spade_kernel_size - - self.learned_shortcut = fin != fout - self.last_activation = last_activation - fmiddle = min(fin, fout) - - # create conv layers - self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1) - self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1) - if self.learned_shortcut: - self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False) - - # apply spectral norm if specified - if spade_use_spectral_norm: - self.conv_0 = SpectralNorm(self.conv_0) - self.conv_1 = SpectralNorm(self.conv_1) - if self.learned_shortcut: - self.conv_s = SpectralNorm(self.conv_s) - - self.norm_0 = SPADE(spade_param_free_norm, spade_kernel_size, fin, cond_nc) - self.norm_1 = SPADE(spade_param_free_norm, spade_kernel_size, fmiddle, cond_nc) - if self.learned_shortcut: - self.norm_s = SPADE(spade_param_free_norm, spade_kernel_size, fin, cond_nc) - - # note the resnet block with SPADE also takes in |seg|, - # the semantic segmentation map as input - def forward(self, x, seg): - x_s = self.shortcut(x, seg) - - dx = self.conv_0(self.activation(self.norm_0(x, seg))) - dx = self.conv_1(self.activation(self.norm_1(dx, seg))) - - out = x_s + dx - if self.last_activation == "lrelu": - return self.activation(out) - elif self.last_activation is None: - return out - else: - raise NotImplementedError( - "The type of activation is not supported: {}".format( - self.last_activation - ) - ) - - def shortcut(self, x, seg): - if self.learned_shortcut: - x_s = self.conv_s(self.norm_s(x, seg)) - else: - x_s = x - return x_s - - def activation(self, x): - return F.leaky_relu(x, 2e-1) - - def __str__(self): - return strings.spaderesblock(self) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/data/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/data/__init__.py deleted file mode 100644 index d0545627efc9a6f9bb180e351ead519a2cb6dea7..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/data/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .extracted_features_dataset import ExtractedFeaturesDataset -from .random_input_dataset import RandomInputDataset - - -__all__ = [ - "ExtractedFeaturesDataset", - "RandomInputDataset", -] diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/logging/progress_bar.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/logging/progress_bar.py deleted file mode 100644 index 061082caefe542c5f0f87e04d9472583874126a3..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/logging/progress_bar.py +++ /dev/null @@ -1,490 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -""" -Wrapper around various loggers and progress bars (e.g., tqdm). -""" - -import atexit -import json -import logging -import os -import sys -from collections import OrderedDict -from contextlib import contextmanager -from numbers import Number -from typing import Optional - -import torch - -from .meters import AverageMeter, StopwatchMeter, TimeMeter - - -logger = logging.getLogger(__name__) - - -def progress_bar( - iterator, - log_format: Optional[str] = None, - log_interval: int = 100, - log_file: Optional[str] = None, - epoch: Optional[int] = None, - prefix: Optional[str] = None, - tensorboard_logdir: Optional[str] = None, - default_log_format: str = "tqdm", - wandb_project: Optional[str] = None, - wandb_run_name: Optional[str] = None, - azureml_logging: Optional[bool] = False, -): - if log_format is None: - log_format = default_log_format - if log_file is not None: - handler = logging.FileHandler(filename=log_file) - logger.addHandler(handler) - - if log_format == "tqdm" and not sys.stderr.isatty(): - log_format = "simple" - - if log_format == "json": - bar = JsonProgressBar(iterator, epoch, prefix, log_interval) - elif log_format == "none": - bar = NoopProgressBar(iterator, epoch, prefix) - elif log_format == "simple": - bar = SimpleProgressBar(iterator, epoch, prefix, log_interval) - elif log_format == "tqdm": - bar = TqdmProgressBar(iterator, epoch, prefix) - else: - raise ValueError("Unknown log format: {}".format(log_format)) - - if tensorboard_logdir: - try: - # [FB only] custom wrapper for TensorBoard - import palaas # noqa - from .fb_tbmf_wrapper import FbTbmfWrapper - - bar = FbTbmfWrapper(bar, log_interval) - except ImportError: - bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir) - - if wandb_project: - bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name) - - if azureml_logging: - bar = AzureMLProgressBarWrapper(bar) - - return bar - - -def build_progress_bar( - args, - iterator, - epoch: Optional[int] = None, - prefix: Optional[str] = None, - default: str = "tqdm", - no_progress_bar: str = "none", -): - """Legacy wrapper that takes an argparse.Namespace.""" - if getattr(args, "no_progress_bar", False): - default = no_progress_bar - if getattr(args, "distributed_rank", 0) == 0: - tensorboard_logdir = getattr(args, "tensorboard_logdir", None) - else: - tensorboard_logdir = None - return progress_bar( - iterator, - log_format=args.log_format, - log_interval=args.log_interval, - epoch=epoch, - prefix=prefix, - tensorboard_logdir=tensorboard_logdir, - default_log_format=default, - ) - - -def format_stat(stat): - if isinstance(stat, Number): - stat = "{:g}".format(stat) - elif isinstance(stat, AverageMeter): - stat = "{:.3f}".format(stat.avg) - elif isinstance(stat, TimeMeter): - stat = "{:g}".format(round(stat.avg)) - elif isinstance(stat, StopwatchMeter): - stat = "{:g}".format(round(stat.sum)) - elif torch.is_tensor(stat): - stat = stat.tolist() - return stat - - -class BaseProgressBar(object): - """Abstract class for progress bars.""" - - def __init__(self, iterable, epoch=None, prefix=None): - self.iterable = iterable - self.n = getattr(iterable, "n", 0) - self.epoch = epoch - self.prefix = "" - if epoch is not None: - self.prefix += "epoch {:03d}".format(epoch) - if prefix is not None: - self.prefix += (" | " if self.prefix != "" else "") + prefix - - def __len__(self): - return len(self.iterable) - - def __enter__(self): - return self - - def __exit__(self, *exc): - return False - - def __iter__(self): - raise NotImplementedError - - def log(self, stats, tag=None, step=None): - """Log intermediate stats according to log_interval.""" - raise NotImplementedError - - def print(self, stats, tag=None, step=None): - """Print end-of-epoch stats.""" - raise NotImplementedError - - def update_config(self, config): - """Log latest configuration.""" - pass - - def _str_commas(self, stats): - return ", ".join(key + "=" + stats[key].strip() for key in stats.keys()) - - def _str_pipes(self, stats): - return " | ".join(key + " " + stats[key].strip() for key in stats.keys()) - - def _format_stats(self, stats): - postfix = OrderedDict(stats) - # Preprocess stats according to datatype - for key in postfix.keys(): - postfix[key] = str(format_stat(postfix[key])) - return postfix - - -@contextmanager -def rename_logger(logger, new_name): - old_name = logger.name - if new_name is not None: - logger.name = new_name - yield logger - logger.name = old_name - - -class JsonProgressBar(BaseProgressBar): - """Log output in JSON format.""" - - def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000): - super().__init__(iterable, epoch, prefix) - self.log_interval = log_interval - self.i = None - self.size = None - - def __iter__(self): - self.size = len(self.iterable) - for i, obj in enumerate(self.iterable, start=self.n): - self.i = i - yield obj - - def log(self, stats, tag=None, step=None): - """Log intermediate stats according to log_interval.""" - step = step or self.i or 0 - if step > 0 and self.log_interval is not None and step % self.log_interval == 0: - update = ( - self.epoch - 1 + (self.i + 1) / float(self.size) - if self.epoch is not None - else None - ) - stats = self._format_stats(stats, epoch=self.epoch, update=update) - with rename_logger(logger, tag): - logger.info(json.dumps(stats)) - - def print(self, stats, tag=None, step=None): - """Print end-of-epoch stats.""" - self.stats = stats - if tag is not None: - self.stats = OrderedDict( - [(tag + "_" + k, v) for k, v in self.stats.items()] - ) - stats = self._format_stats(self.stats, epoch=self.epoch) - with rename_logger(logger, tag): - logger.info(json.dumps(stats)) - - def _format_stats(self, stats, epoch=None, update=None): - postfix = OrderedDict() - if epoch is not None: - postfix["epoch"] = epoch - if update is not None: - postfix["update"] = round(update, 3) - # Preprocess stats according to datatype - for key in stats.keys(): - postfix[key] = format_stat(stats[key]) - return postfix - - -class NoopProgressBar(BaseProgressBar): - """No logging.""" - - def __init__(self, iterable, epoch=None, prefix=None): - super().__init__(iterable, epoch, prefix) - - def __iter__(self): - for obj in self.iterable: - yield obj - - def log(self, stats, tag=None, step=None): - """Log intermediate stats according to log_interval.""" - pass - - def print(self, stats, tag=None, step=None): - """Print end-of-epoch stats.""" - pass - - -class SimpleProgressBar(BaseProgressBar): - """A minimal logger for non-TTY environments.""" - - def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000): - super().__init__(iterable, epoch, prefix) - self.log_interval = log_interval - self.i = None - self.size = None - - def __iter__(self): - self.size = len(self.iterable) - for i, obj in enumerate(self.iterable, start=self.n): - self.i = i - yield obj - - def log(self, stats, tag=None, step=None): - """Log intermediate stats according to log_interval.""" - step = step or self.i or 0 - if step > 0 and self.log_interval is not None and step % self.log_interval == 0: - stats = self._format_stats(stats) - postfix = self._str_commas(stats) - with rename_logger(logger, tag): - logger.info( - "{}: {:5d} / {:d} {}".format( - self.prefix, self.i + 1, self.size, postfix - ) - ) - - def print(self, stats, tag=None, step=None): - """Print end-of-epoch stats.""" - postfix = self._str_pipes(self._format_stats(stats)) - with rename_logger(logger, tag): - logger.info("{} | {}".format(self.prefix, postfix)) - - -class TqdmProgressBar(BaseProgressBar): - """Log to tqdm.""" - - def __init__(self, iterable, epoch=None, prefix=None): - super().__init__(iterable, epoch, prefix) - from tqdm import tqdm - - self.tqdm = tqdm( - iterable, - self.prefix, - leave=False, - disable=(logger.getEffectiveLevel() > logging.INFO), - ) - - def __iter__(self): - return iter(self.tqdm) - - def log(self, stats, tag=None, step=None): - """Log intermediate stats according to log_interval.""" - self.tqdm.set_postfix(self._format_stats(stats), refresh=False) - - def print(self, stats, tag=None, step=None): - """Print end-of-epoch stats.""" - postfix = self._str_pipes(self._format_stats(stats)) - with rename_logger(logger, tag): - logger.info("{} | {}".format(self.prefix, postfix)) - - -try: - _tensorboard_writers = {} - from torch.utils.tensorboard import SummaryWriter -except ImportError: - try: - from tensorboardX import SummaryWriter - except ImportError: - SummaryWriter = None - - -def _close_writers(): - for w in _tensorboard_writers.values(): - w.close() - - -atexit.register(_close_writers) - - -class TensorboardProgressBarWrapper(BaseProgressBar): - """Log to tensorboard.""" - - def __init__(self, wrapped_bar, tensorboard_logdir): - self.wrapped_bar = wrapped_bar - self.tensorboard_logdir = tensorboard_logdir - - if SummaryWriter is None: - logger.warning( - "tensorboard not found, please install with: pip install tensorboard" - ) - - def _writer(self, key): - if SummaryWriter is None: - return None - _writers = _tensorboard_writers - if key not in _writers: - _writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key)) - _writers[key].add_text("sys.argv", " ".join(sys.argv)) - return _writers[key] - - def __iter__(self): - return iter(self.wrapped_bar) - - def log(self, stats, tag=None, step=None): - """Log intermediate stats to tensorboard.""" - self._log_to_tensorboard(stats, tag, step) - self.wrapped_bar.log(stats, tag=tag, step=step) - - def print(self, stats, tag=None, step=None): - """Print end-of-epoch stats.""" - self._log_to_tensorboard(stats, tag, step) - self.wrapped_bar.print(stats, tag=tag, step=step) - - def update_config(self, config): - """Log latest configuration.""" - # TODO add hparams to Tensorboard - self.wrapped_bar.update_config(config) - - def _log_to_tensorboard(self, stats, tag=None, step=None): - writer = self._writer(tag or "") - if writer is None: - return - if step is None: - step = stats["num_updates"] - for key in stats.keys() - {"num_updates"}: - if isinstance(stats[key], AverageMeter): - writer.add_scalar(key, stats[key].val, step) - elif isinstance(stats[key], Number): - writer.add_scalar(key, stats[key], step) - elif torch.is_tensor(stats[key]) and stats[key].numel() == 1: - writer.add_scalar(key, stats[key].item(), step) - writer.flush() - - -try: - import wandb -except ImportError: - wandb = None - - -class WandBProgressBarWrapper(BaseProgressBar): - """Log to Weights & Biases.""" - - def __init__(self, wrapped_bar, wandb_project, run_name=None): - self.wrapped_bar = wrapped_bar - if wandb is None: - logger.warning("wandb not found, pip install wandb") - return - - # reinit=False to ensure if wandb.init() is called multiple times - # within one process it still references the same run - wandb.init(project=wandb_project, reinit=False, name=run_name) - - def __iter__(self): - return iter(self.wrapped_bar) - - def log(self, stats, tag=None, step=None): - """Log intermediate stats to tensorboard.""" - self._log_to_wandb(stats, tag, step) - self.wrapped_bar.log(stats, tag=tag, step=step) - - def print(self, stats, tag=None, step=None): - """Print end-of-epoch stats.""" - self._log_to_wandb(stats, tag, step) - self.wrapped_bar.print(stats, tag=tag, step=step) - - def update_config(self, config): - """Log latest configuration.""" - if wandb is not None: - wandb.config.update(config) - self.wrapped_bar.update_config(config) - - def _log_to_wandb(self, stats, tag=None, step=None): - if wandb is None: - return - if step is None: - step = stats["num_updates"] - - prefix = "" if tag is None else tag + "/" - - for key in stats.keys() - {"num_updates"}: - if isinstance(stats[key], AverageMeter): - wandb.log({prefix + key: stats[key].val}, step=step) - elif isinstance(stats[key], Number): - wandb.log({prefix + key: stats[key]}, step=step) - - -try: - from azureml.core import Run -except ImportError: - Run = None - - -class AzureMLProgressBarWrapper(BaseProgressBar): - """Log to Azure ML""" - - def __init__(self, wrapped_bar): - self.wrapped_bar = wrapped_bar - if Run is None: - logger.warning("azureml.core not found, pip install azureml-core") - return - self.run = Run.get_context() - - def __exit__(self, *exc): - if Run is not None: - self.run.complete() - return False - - def __iter__(self): - return iter(self.wrapped_bar) - - def log(self, stats, tag=None, step=None): - """Log intermediate stats to AzureML""" - self._log_to_azureml(stats, tag, step) - self.wrapped_bar.log(stats, tag=tag, step=step) - - def print(self, stats, tag=None, step=None): - """Print end-of-epoch stats""" - self._log_to_azureml(stats, tag, step) - self.wrapped_bar.print(stats, tag=tag, step=step) - - def update_config(self, config): - """Log latest configuration.""" - self.wrapped_bar.update_config(config) - - def _log_to_azureml(self, stats, tag=None, step=None): - if Run is None: - return - if step is None: - step = stats["num_updates"] - - prefix = "" if tag is None else tag + "/" - - for key in stats.keys() - {"num_updates"}: - name = prefix + key - if isinstance(stats[key], AverageMeter): - self.run.log_row(name=name, **{"step": step, key: stats[key].val}) - elif isinstance(stats[key], Number): - self.run.log_row(name=name, **{"step": step, key: stats[key]}) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq_cli/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq_cli/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/config/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/config/__init__.py deleted file mode 100644 index 6264236915a7269a4d920ee8213004374dd86a9a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/config/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/translation_multi_simple_epoch.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/translation_multi_simple_epoch.py deleted file mode 100644 index 6f36e5b93e98497de31969d203ae04dbb4bd9306..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/translation_multi_simple_epoch.py +++ /dev/null @@ -1,430 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import datetime -import logging -import time - -import torch -from fairseq.data import ( - FairseqDataset, - LanguagePairDataset, - ListDataset, - data_utils, - iterators, -) -from fairseq.data.multilingual.multilingual_data_manager import ( - MultilingualDatasetManager, -) -from fairseq.data.multilingual.sampling_method import SamplingMethod -from fairseq.tasks import LegacyFairseqTask, register_task -from fairseq.utils import FileContentsAction - - -### -def get_time_gap(s, e): - return ( - datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s) - ).__str__() - - -### - - -logger = logging.getLogger(__name__) - - -@register_task("translation_multi_simple_epoch") -class TranslationMultiSimpleEpochTask(LegacyFairseqTask): - """ - Translate from one (source) language to another (target) language. - - Args: - langs (List[str]): a list of languages that are being supported - dicts (Dict[str, fairseq.data.Dictionary]): mapping from supported languages to their dictionaries - training (bool): whether the task should be configured for training or not - - .. note:: - - The translation task is compatible with :mod:`fairseq-train`, - :mod:`fairseq-generate` and :mod:`fairseq-interactive`. - - The translation task provides the following additional command-line - arguments: - - .. argparse:: - :ref: fairseq.tasks.translation_parser - :prog: - """ - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - # fmt: off - parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', - help='inference source language') - parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', - help='inference target language') - parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', - help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr', - action=FileContentsAction) - parser.add_argument('--keep-inference-langtok', action='store_true', - help='keep language tokens in inference output (e.g. for analysis or debugging)') - - SamplingMethod.add_arguments(parser) - MultilingualDatasetManager.add_args(parser) - # fmt: on - - def __init__(self, args, langs, dicts, training): - super().__init__(args) - self.langs = langs - self.dicts = dicts - self.training = training - if training: - self.lang_pairs = args.lang_pairs - else: - self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)] - # eval_lang_pairs for multilingual translation is usually all of the - # lang_pairs. However for other multitask settings or when we want to - # optimize for certain languages we want to use a different subset. Thus - # the eval_lang_pairs class variable is provided for classes that extend - # this class. - self.eval_lang_pairs = self.lang_pairs - # model_lang_pairs will be used to build encoder-decoder model pairs in - # models.build_model(). This allows multitask type of sub-class can - # build models other than the input lang_pairs - self.model_lang_pairs = self.lang_pairs - self.source_langs = [d.split("-")[0] for d in self.lang_pairs] - self.target_langs = [d.split("-")[1] for d in self.lang_pairs] - self.check_dicts(self.dicts, self.source_langs, self.target_langs) - - self.sampling_method = SamplingMethod.build_sampler(args, self) - self.data_manager = MultilingualDatasetManager.setup_data_manager( - args, self.lang_pairs, langs, dicts, self.sampling_method - ) - - def check_dicts(self, dicts, source_langs, target_langs): - if self.args.source_dict is not None or self.args.target_dict is not None: - # no need to check whether the source side and target side are sharing dictionaries - return - src_dict = dicts[source_langs[0]] - tgt_dict = dicts[target_langs[0]] - for src_lang in source_langs: - assert ( - src_dict == dicts[src_lang] - ), "Diffrent dictionary are specified for different source languages; " - "TranslationMultiSimpleEpochTask only supports one shared dictionary across all source languages" - for tgt_lang in target_langs: - assert ( - tgt_dict == dicts[tgt_lang] - ), "Diffrent dictionary are specified for different target languages; " - "TranslationMultiSimpleEpochTask only supports one shared dictionary across all target languages" - - @classmethod - def setup_task(cls, args, **kwargs): - langs, dicts, training = MultilingualDatasetManager.prepare( - cls.load_dictionary, args, **kwargs - ) - return cls(args, langs, dicts, training) - - def has_sharded_data(self, split): - return self.data_manager.has_sharded_data(split) - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - if split in self.datasets: - dataset = self.datasets[split] - if self.has_sharded_data(split): - if self.args.virtual_epoch_size is not None: - if dataset.load_next_shard: - shard_epoch = dataset.shard_epoch - else: - # no need to load next shard so skip loading - # also this avoid always loading from beginning of the data - return - else: - shard_epoch = epoch - else: - # estimate the shard epoch from virtual data size and virtual epoch size - shard_epoch = self.data_manager.estimate_global_pass_epoch(epoch) - logger.info(f"loading data for {split} epoch={epoch}/{shard_epoch}") - logger.info(f"mem usage: {data_utils.get_mem_usage()}") - if split in self.datasets: - del self.datasets[split] - logger.info("old dataset deleted manually") - logger.info(f"mem usage: {data_utils.get_mem_usage()}") - self.datasets[split] = self.data_manager.load_dataset( - split, - self.training, - epoch=epoch, - combine=combine, - shard_epoch=shard_epoch, - **kwargs, - ) - - def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): - if constraints is not None: - raise NotImplementedError( - "Constrained decoding with the multilingual_translation task is not supported" - ) - - src_data = ListDataset(src_tokens, src_lengths) - dataset = LanguagePairDataset(src_data, src_lengths, self.source_dictionary) - src_langtok_spec, tgt_langtok_spec = self.args.langtoks["main"] - if self.args.lang_tok_replacing_bos_eos: - dataset = self.data_manager.alter_dataset_langtok( - dataset, - src_eos=self.source_dictionary.eos(), - src_lang=self.args.source_lang, - tgt_eos=self.target_dictionary.eos(), - tgt_lang=self.args.target_lang, - src_langtok_spec=src_langtok_spec, - tgt_langtok_spec=tgt_langtok_spec, - ) - else: - dataset.src = self.data_manager.src_dataset_tranform_func( - self.args.source_lang, - self.args.target_lang, - dataset=dataset.src, - spec=src_langtok_spec, - ) - return dataset - - def build_generator( - self, - models, - args, - seq_gen_cls=None, - extra_gen_cls_kwargs=None, - ): - if not getattr(args, "keep_inference_langtok", False): - _, tgt_langtok_spec = self.args.langtoks["main"] - if tgt_langtok_spec: - tgt_lang_tok = self.data_manager.get_decoder_langtok( - self.args.target_lang, tgt_langtok_spec - ) - extra_gen_cls_kwargs = extra_gen_cls_kwargs or {} - extra_gen_cls_kwargs["symbols_to_strip_from_output"] = {tgt_lang_tok} - - return super().build_generator( - models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs - ) - - def build_model(self, args): - return super().build_model(args) - - def valid_step(self, sample, model, criterion): - loss, sample_size, logging_output = super().valid_step(sample, model, criterion) - return loss, sample_size, logging_output - - def inference_step( - self, generator, models, sample, prefix_tokens=None, constraints=None - ): - with torch.no_grad(): - _, tgt_langtok_spec = self.args.langtoks["main"] - if not self.args.lang_tok_replacing_bos_eos: - if prefix_tokens is None and tgt_langtok_spec: - tgt_lang_tok = self.data_manager.get_decoder_langtok( - self.args.target_lang, tgt_langtok_spec - ) - src_tokens = sample["net_input"]["src_tokens"] - bsz = src_tokens.size(0) - prefix_tokens = ( - torch.LongTensor([[tgt_lang_tok]]).expand(bsz, 1).to(src_tokens) - ) - return generator.generate( - models, - sample, - prefix_tokens=prefix_tokens, - constraints=constraints, - ) - else: - return generator.generate( - models, - sample, - prefix_tokens=prefix_tokens, - bos_token=self.data_manager.get_decoder_langtok( - self.args.target_lang, tgt_langtok_spec - ) - if tgt_langtok_spec - else self.target_dictionary.eos(), - ) - - def reduce_metrics(self, logging_outputs, criterion): - super().reduce_metrics(logging_outputs, criterion) - - def max_positions(self): - """Return the max sentence length allowed by the task.""" - return (self.args.max_source_positions, self.args.max_target_positions) - - @property - def source_dictionary(self): - return self.data_manager.get_source_dictionary(self.source_langs[0]) - - @property - def target_dictionary(self): - return self.data_manager.get_target_dictionary(self.target_langs[0]) - - def create_batch_sampler_func( - self, - max_positions, - ignore_invalid_inputs, - max_tokens, - max_sentences, - required_batch_size_multiple=1, - seed=1, - ): - def construct_batch_sampler(dataset, epoch): - splits = [ - s for s, _ in self.datasets.items() if self.datasets[s] == dataset - ] - split = splits[0] if len(splits) > 0 else None - # NEW implementation - if epoch is not None: - # initialize the dataset with the correct starting epoch - dataset.set_epoch(epoch) - - # get indices ordered by example size - start_time = time.time() - logger.info(f"start batch sampler: mem usage: {data_utils.get_mem_usage()}") - - with data_utils.numpy_seed(seed): - indices = dataset.ordered_indices() - logger.info( - f"[{split}] @batch_sampler order indices time: {get_time_gap(start_time, time.time())}" - ) - logger.info(f"mem usage: {data_utils.get_mem_usage()}") - - # filter examples that are too large - if max_positions is not None: - my_time = time.time() - indices = self.filter_indices_by_size( - indices, dataset, max_positions, ignore_invalid_inputs - ) - logger.info( - f"[{split}] @batch_sampler filter_by_size time: {get_time_gap(my_time, time.time())}" - ) - logger.info(f"mem usage: {data_utils.get_mem_usage()}") - - # create mini-batches with given size constraints - my_time = time.time() - batch_sampler = dataset.batch_by_size( - indices, - max_tokens=max_tokens, - max_sentences=max_sentences, - required_batch_size_multiple=required_batch_size_multiple, - ) - - logger.info( - f"[{split}] @batch_sampler batch_by_size time: {get_time_gap(my_time, time.time())}" - ) - logger.info( - f"[{split}] per epoch batch_sampler set-up time: {get_time_gap(start_time, time.time())}" - ) - logger.info(f"mem usage: {data_utils.get_mem_usage()}") - - return batch_sampler - - return construct_batch_sampler - - # we need to override get_batch_iterator because we want to reset the epoch iterator each time - def get_batch_iterator( - self, - dataset, - max_tokens=None, - max_sentences=None, - max_positions=None, - ignore_invalid_inputs=False, - required_batch_size_multiple=1, - seed=1, - num_shards=1, - shard_id=0, - num_workers=0, - epoch=1, - data_buffer_size=0, - disable_iterator_cache=False, - ): - """ - Get an iterator that yields batches of data from the given dataset. - - Args: - dataset (~fairseq.data.FairseqDataset): dataset to batch - max_tokens (int, optional): max number of tokens in each batch - (default: None). - max_sentences (int, optional): max number of sentences in each - batch (default: None). - max_positions (optional): max sentence length supported by the - model (default: None). - ignore_invalid_inputs (bool, optional): don't raise Exception for - sentences that are too long (default: False). - required_batch_size_multiple (int, optional): require batch size to - be a multiple of N (default: 1). - seed (int, optional): seed for random number generator for - reproducibility (default: 1). - num_shards (int, optional): shard the data iterator into N - shards (default: 1). - shard_id (int, optional): which shard of the data iterator to - return (default: 0). - num_workers (int, optional): how many subprocesses to use for data - loading. 0 means the data will be loaded in the main process - (default: 0). - epoch (int, optional): the epoch to start the iterator from - (default: 0). - data_buffer_size (int, optional): number of batches to - preload (default: 0). - disable_iterator_cache (bool, optional): don't cache the - EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`) - (default: False). - Returns: - ~fairseq.iterators.EpochBatchIterator: a batched iterator over the - given dataset split - """ - # initialize the dataset with the correct starting epoch - assert isinstance(dataset, FairseqDataset) - if dataset in self.dataset_to_epoch_iter: - return self.dataset_to_epoch_iter[dataset] - if self.args.sampling_method == "RoundRobin": - batch_iter = super().get_batch_iterator( - dataset, - max_tokens=max_tokens, - max_sentences=max_sentences, - max_positions=max_positions, - ignore_invalid_inputs=ignore_invalid_inputs, - required_batch_size_multiple=required_batch_size_multiple, - seed=seed, - num_shards=num_shards, - shard_id=shard_id, - num_workers=num_workers, - epoch=epoch, - data_buffer_size=data_buffer_size, - disable_iterator_cache=disable_iterator_cache, - ) - self.dataset_to_epoch_iter[dataset] = batch_iter - return batch_iter - - construct_batch_sampler = self.create_batch_sampler_func( - max_positions, - ignore_invalid_inputs, - max_tokens, - max_sentences, - required_batch_size_multiple=required_batch_size_multiple, - seed=seed, - ) - - epoch_iter = iterators.EpochBatchIterator( - dataset=dataset, - collate_fn=dataset.collater, - batch_sampler=construct_batch_sampler, - seed=seed, - num_shards=num_shards, - shard_id=shard_id, - num_workers=num_workers, - epoch=epoch, - ) - return epoch_iter diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/fast_noisy_channel/README.md b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/fast_noisy_channel/README.md deleted file mode 100644 index f2631a8c34d11bdf7d351c6807b6fe415f5715e1..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/fast_noisy_channel/README.md +++ /dev/null @@ -1,345 +0,0 @@ -# Language Models not just for Pre-training: Fast Online Neural Noisy Channel Modeling - -## Introduction -- [Yee et al. (2019)](https://www.aclweb.org/anthology/D19-1571.pdf) introduce a simple and effective noisy channel modeling approach for neural machine translation. However, the noisy channel online decoding approach introduced in this paper is too slow to be practical. -- To address this, [Bhosale et al. (2020)](http://www.statmt.org/wmt20/pdf/2020.wmt-1.68.pdf) introduces 3 simple approximations to make this approach very fast and practical without much loss in accuracy. -- This README provides intructions on how to run online decoding or generation with the noisy channel modeling approach, including ways to make it very fast without much loss in accuracy. - -## Noisy Channel Modeling - -[Yee et al. (2019)](https://www.aclweb.org/anthology/D19-1571.pdf) applies the Bayes Rule to predict `P(y|x)`, the probability of the target `y` given the source `x`. -```P(y|x) = P(x|y) * P(y) / P(x)``` -- `P(x|y)` predicts the source `x` given the target `y` and is referred to as the **channel model** -- `P(y)` is a **language model** over the target `y` -- `P(x)` is generally not modeled since it is constant for all `y`. - -We use Transformer models to parameterize the direct model `P(y|x)`, the channel model `P(x|y)` and the language model `P(y)`. - -During online decoding with beam search, we generate the top `K2` candidates per beam and score them with the following linear combination of the channel model, the language model as well as the direct model scores. - -```(1 / t) * log(P(y|x) + (1 / s) * ( λ1 * log(P(x|y)) + λ2 * log(P(y) ) )``` -- `t` - Target Prefix Length -- `s` - Source Length -- `λ1` - Channel Model Weight -- `λ2` - Language Model Weight - -The top `beam_size` candidates based on the above combined scores are chosen to continue the beams in beam search. In beam search with a direct model alone, the scores from the direct model `P(y|x)` are used to choose the top candidates in beam search. - -This framework provides a great way to utlize strong target language models trained on large amounts of unlabeled data. Language models can prefer targets unrelated to the source, so we also need a channel model whose role is to ensure that the target preferred by the language model also translates back to the source. - -### Training Translation Models and Language Models - -For training Transformer models in fairseq for machine translation, refer to instructions [here](https://github.com/pytorch/fairseq/tree/main/examples/translation) - -For training Transformer models in fairseq for language modeling, refer to instructions [here](https://github.com/pytorch/fairseq/tree/main/examples/language_model) - -### Generation with Language Model for German-English translation with fairseq - -Here are instructions to generate using a direct model and a target-side language model. - -Note: -- Download and install fairseq as per instructions [here](https://github.com/pytorch/fairseq) -- Preprocess and binarize the dataset as per instructions in section [Test Data Preprocessing](#test-data-preprocessing) - -```sh -binarized_data=data_dir/binarized -direct_model=de_en_seed4.pt -lm_model=en_lm.pt -lm_data=lm_data -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed4.pt -O ${direct_model} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/transformer_lm.pt -O ${lm_model} -mkdir -p ${lm_data} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/lm_dict/dict.txt -O ${lm_data}/dict.txt - -k2=10 -lenpen=0.16 -lm_wt=0.14 -fairseq-generate ${binarized_data} \ - --user-dir examples/fast_noisy_channel \ - --beam 5 \ - --path ${direct_model} \ - --lm-model ${lm_model} \ - --lm-data ${lm_data} \ - --k2 ${k2} \ - --combine-method lm_only \ - --task noisy_channel_translation \ - --lenpen ${lenpen} \ - --lm-wt ${lm_wt} \ - --gen-subset valid \ - --remove-bpe \ - --fp16 \ - --batch-size 10 -``` -### Noisy Channel Generation for German-English translation with fairseq - -Here are instructions for noisy channel generation with a direct model, channel model and language model as explained in section [Noisy Channel Modeling](#noisy-channel-modeling). - -Note: -- Download and install fairseq as per instructions [here](https://github.com/pytorch/fairseq) -- Preprocess and binarize the dataset as per instructions in section [Test Data Preprocessing](#test-data-preprocessing) - -```sh -binarized_data=data_dir/binarized -direct_model=de_en_seed4.pt -lm_model=en_lm.pt -lm_data=lm_data -ch_model=en_de.big.seed4.pt -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed4.pt -O ${direct_model} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/transformer_lm.pt -O ${lm_model} -mkdir -p ${lm_data} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/lm_dict/dict.txt -O ${lm_data}/dict.txt -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big.seed4.pt -O ${ch_model} - -k2=10 -lenpen=0.21 -lm_wt=0.50 -bw_wt=0.30 -fairseq-generate ${binarized_data} \ - --user-dir examples/fast_noisy_channel \ - --beam 5 \ - --path ${direct_model} \ - --lm-model ${lm_model} \ - --lm-data ${lm_data} \ - --channel-model ${ch_model} \ - --k2 ${k2} \ - --combine-method noisy_channel \ - --task noisy_channel_translation \ - --lenpen ${lenpen} \ - --lm-wt ${lm_wt} \ - --ch-wt ${bw_wt} \ - --gen-subset test \ - --remove-bpe \ - --fp16 \ - --batch-size 1 -``` -## Fast Noisy Channel Modeling - -[Bhosale et al. (2020)](http://www.statmt.org/wmt20/pdf/2020.wmt-1.68.pdf) introduces 3 approximations that speed up online noisy channel decoding - -- Smaller channel models (`Tranformer Base` with 1 encoder and decoder layer each vs. `Transformer Big`) - - This involves training a channel model that is possibly smaller and less accurate in terms of BLEU than a channel model of the same size as the direct model. - - Since the role of the channel model is mainly to assign low scores to generations from the language model if they don't translate back to the source, we may not need the most accurate channel model for this purpose. -- Smaller output vocabulary size for the channel model (~30,000 -> ~1000) - - The channel model doesn't need to score the full output vocabulary, it just needs to score the source tokens, which are completely known. - - This is specified using the arguments `--channel-scoring-type src_vocab --top-k-vocab 500` - - This means that the output vocabulary for the channel model will be the source tokens for all examples in the batch and the top-K most frequent tokens in the vocabulary - - This reduces the memory consumption needed to store channel model scores significantly -- Smaller number of candidates (`k2`) scored per beam - - This is specified by reducing the argument `--k2` - - -### Fast Noisy Channel Generation for German-English translation with fairseq - -Here are instructions for **fast** noisy channel generation with a direct model, channel model and language model as explained in section [Fast Noisy Channel Modeling](#fast-noisy-channel-modeling). The main differences are that we use a smaller channel model, reduce `--k2`, set `--channel-scoring-type src_vocab --top-k-vocab 500` and increase the `--batch-size`. - -Note: -- Download and install fairseq as per instructions [here](https://github.com/pytorch/fairseq) -- Preprocess and binarize the dataset as per instructions in section [Test Data Preprocessing](#test-data-preprocessing) - -```sh -binarized_data=data_dir/binarized -direct_model=de_en_seed4.pt -lm_model=en_lm.pt -lm_data=lm_data -small_ch_model=en_de.base_1_1.seed4.pt -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed4.pt -O ${direct_model} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/transformer_lm.pt -O ${lm_model} -mkdir -p ${lm_data} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/lm_dict/dict.txt -O ${lm_data}/dict.txt -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base_1_1.seed4.pt -O ${small_ch_model} - -k2=3 -lenpen=0.23 -lm_wt=0.58 -bw_wt=0.26 -fairseq-generate ${binarized_data} \ - --user-dir examples/fast_noisy_channel \ - --beam 5 \ - --path ${direct_model} \ - --lm-model ${lm_model} \ - --lm-data ${lm_data} \ - --channel-model ${small_ch_model} \ - --k2 ${k2} \ - --combine-method noisy_channel \ - --task noisy_channel_translation \ - --lenpen ${lenpen} \ - --lm-wt ${lm_wt} \ - --ch-wt ${bw_wt} \ - --gen-subset test \ - --remove-bpe \ - --fp16 \ - --batch-size 50 \ - --channel-scoring-type src_vocab --top-k-vocab 500 -``` - -## Test Data Preprocessing - -For preprocessing and binarizing the test sets for Romanian-English and German-English translation, we use the following script - - -```sh -FAIRSEQ=/path/to/fairseq -cd $FAIRSEQ -SCRIPTS=$FAIRSEQ/mosesdecoder/scripts -if [ ! -d "${SCRIPTS}" ]; then - echo 'Cloning Moses github repository (for tokenization scripts)...' - git clone https://github.com/moses-smt/mosesdecoder.git -fi -TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl -NORMALIZE=$SCRIPTS/tokenizer/normalize-punctuation.perl - -s=de -t=en -test=wmt18 - -mkdir -p data_dir - -# Tokenization -if [ $s == "ro" ] ; then - # Note: Get normalise-romanian.py and remove-diacritics.py from - # https://github.com/rsennrich/wmt16-scripts/tree/master/preprocess - sacrebleu -t $test -l $s-$t --echo src | \ - $NORMALIZE -l $s | \ - python normalise-romanian.py | \ - python remove-diacritics.py | \ - $TOKENIZER -l $s -a -q > data_dir/$test.$s-$t.$s -else - sacrebleu -t $test -l $s-$t --echo src | perl $NORMALIZE -l $s | perl $TOKENIZER -threads 8 -a -l $s > data_dir/$test.$s-$t.$s -fi - -sacrebleu -t $test -l $s-$t --echo ref | perl $NORMALIZE -l $t | perl $TOKENIZER -threads 8 -a -l $t > data_dir/$test.$s-$t.$t - - -# Applying BPE -src_bpe_code=/path/to/source/language/bpe/code -tgt_bpe_code=/path/to/target/language/bpe/code -src_dict=/path/to/source/language/dict -tgt_dict=/path/to/target/language/dict - -FASTBPE=$FAIRSEQ/fastBPE -if [ ! -d "${FASTBPE}" ] ; then - git clone https://github.com/glample/fastBPE.git - # Follow compilation instructions at https://github.com/glample/fastBPE - g++ -std=c++11 -pthread -O3 fastBPE/main.cc -IfastBPE -o fast -fi - -${FASTBPE}/fast applybpe data_dir/bpe.$test.$s-$t.$s data_dir/$test.$s-$t.$s ${src_bpe_code} -${FASTBPE}/fast applybpe data_dir/bpe.$test.$s-$t.$s data_dir/$test.$s-$t.$s ${tgt_bpe_code} - -fairseq-preprocess -s $s -t $t \ - --testpref data_dir/bpe.$test.$s-$t \ - --destdir data_dir/binarized \ - --srcdict ${src_dict} \ - --tgtdict ${tgt_dict} -``` - -## Calculating BLEU - -```sh -DETOKENIZER=$SCRIPTS/tokenizer/detokenizer.perl -cat ${generation_output} | grep -P "^H" | sort -V | cut -f 3- | $DETOKENIZER -l $t -q -a | sacrebleu -t $test -l $s-$t -``` - - -## Romanian-English Translation - -The direct and channel models are trained using bitext data (WMT16) combined with backtranslated data (The monolingual data used for backtranslation comes from http://data.statmt.org/rsennrich/wmt16_backtranslations/ (Sennrich et al., 2016c)) - -The backtranslated data is generated using an ensemble of 3 English-Romanian models trained on bitext training data (WMT16) with unrestricted sampling. - -### BPE Codes and Dictionary - -We learn a joint BPE vocabulary of 18K types on the bitext training data which is used for both the source and target. -||Path| -|----------|------| -| BPE Code | [joint_bpe_18k](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/bpe_18k) | -| Dictionary | [dict](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/dict) | - -### Direct Models -For Ro-En with backtranslation, the direct and channel models use a Transformer-Big architecture. - -| Seed | Model | -|----|----| -| 2 | [ro_en_seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/direct_models/seed2.pt) -| 4 | [ro_en_seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/direct_models/seed4.pt) -| 6 | [ro_en_seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/direct_models/seed6.pt) - -### Channel Models -For channel models, we follow the same steps as for the direct models. But backtranslated data is generated in the opposite direction using [this Romanian monolingual data](http://data.statmt.org/rsennrich/wmt16_backtranslations/). -The best lenpen, LM weight and CH weight are obtained by sweeping over the validation set (wmt16/dev) using beam 5. -| Model Size | Lenpen | LM Weight | CH Weight | Seed 2 | Seed 4 | Seed 6 | -|----|----|----|----|----|----|----| -| `big` | 0.84 | 0.64 | 0.56 | [big.seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/big.seed2.pt) | [big.seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/big.seed2.pt) | [big.seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/big.seed2.pt) | -| `base_1_1` | 0.63 | 0.40 | 0.37 | [base_1_1.seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/base_1_1.seed2.pt) | [base_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/base_1_1.seed4.pt) | [base_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/base_1_1.seed6.pt) | - -### Language Model -The model is trained on de-duplicated English Newscrawl data from 2007-2018 comprising 186 million sentences or 4.5B words after normalization and tokenization. -| | Path | -|----|----| -| `--lm-model` | [transformer_en_lm](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/lm_model/transformer_lm.pt) | -| `--lm-data` | [lm_data](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/lm_model/lm_dict) - -## German-English Translation - -### BPE Codes and Dictionaries - -| | Path| -|----------|------| -| Source BPE Code | [de_bpe_code_24K](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/de_bpe_code_24K) | -| Target BPE Code | [en_bpe_code_24K](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/en_bpe_code_24K) -| Source Dictionary | [de_dict](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/de_dict) | -| Target Dictionary | [en_dict](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/en_dict) | - -### Direct Models -We train on WMT’19 training data. Following [Ng et al., 2019](http://statmt.org/wmt19/pdf/53/WMT33.pdf), we apply language identification filtering and remove sentences longer than 250 tokens as well as sentence pairs with a source/target length ratio exceeding 1.5. This results in 26.8M sentence pairs. -We use the Transformer-Big architecture for the direct model. - -| Seed | Model | -|:----:|----| -| 4 | [de_en_seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed4.pt) -| 5 | [de_en_seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed5.pt) -| 6 | [de_en_seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed6.pt) - -### Channel Models - -We train on WMT’19 training data. Following [Ng et al., 2019](http://statmt.org/wmt19/pdf/53/WMT33.pdf), we apply language identification filtering and remove sentences longer than 250 tokens as well as sentence pairs with a source/target length ratio exceeding 1.5. This results in 26.8M sentence pairs. - -| Model Size | Seed 4 | Seed 5 | Seed 6 | -|----|----|----|----| -| `big` | [big.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big.seed4.pt) | [big.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big.seed5.pt) | [big.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big.seed6.pt) | -| `big_1_1` | [big_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big_1_1.seed4.pt) | [big_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big_1_1.seed5.pt) | [big_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big_1_1.seed6.pt) | -| `base` | [base.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base.seed4.pt) | [base.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base.seed5.pt) | [base.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base.seed6.pt) | -| `base_1_1` | [base_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base_1_1.seed4.pt) | [base_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base_1_1.seed5.pt) | [base_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base_1_1.seed6.pt) | -| `half` | [half.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half.seed4.pt) | [half.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half.seed5.pt) | [half.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half.seed6.pt) | -| `half_1_1` | [half_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half_1_1.seed4.pt) | [half_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half_1_1.seed5.pt) | [half_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half_1_1.seed6.pt) | -| `quarter` | [quarter.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter.seed4.pt) | [quarter.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter.seed5.pt) | [quarter.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter.seed6.pt) | -| `quarter_1_1` | [quarter_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter_1_1.seed4.pt) | [quarter_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter_1_1.seed5.pt) | [quarter_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter_1_1.seed6.pt) | -| `8th` | [8th.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th.seed4.pt) | [8th.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th.seed5.pt) | [8th.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th.seed6.pt) | -| `8th_1_1` | [8th_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th_1_1.seed4.pt) | [8th_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th_1_1.seed5.pt) | [8th_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th_1_1.seed6.pt) | -| `16th` | [16th.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th.seed4.pt) | [16th.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th.seed5.pt) | [16th.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th.seed6.pt) | -| `16th_1_1` | [16th_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th_1_1.seed4.pt) | [16th_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th_1_1.seed5.pt) | [16th_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th_1_1.seed6.pt) | - -### Language Model -The model is trained on de-duplicated English Newscrawl data from 2007-2018 comprising 186 million sentences or 4.5B words after normalization and tokenization. -| | Path | -|----|----| -| `--lm-model` | [transformer_en_lm](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/transformer_lm.pt) | -| `--lm-data` | [lm_data](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/lm_dict/) - - -## Citation - -```bibtex -@inproceedings{bhosale2020language, - title={Language Models not just for Pre-training: Fast Online Neural Noisy Channel Modeling}, - author={Shruti Bhosale and Kyra Yee and Sergey Edunov and Michael Auli}, - booktitle={Proceedings of the Fifth Conference on Machine Translation (WMT)}, - year={2020}, -} - -@inproceedings{yee2019simple, - title={Simple and Effective Noisy Channel Modeling for Neural Machine Translation}, - author={Yee, Kyra and Dauphin, Yann and Auli, Michael}, - booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)}, - pages={5700--5705}, - year={2019} -} -``` diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/README.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/README.md deleted file mode 100644 index 17030bf0fd50bb843a508e13e97ed436eae33287..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/README.md +++ /dev/null @@ -1,83 +0,0 @@ -### 2021 Update: We are merging this example into the [S2T framework](../speech_to_text), which supports more generic speech-to-text tasks (e.g. speech translation) and more flexible data processing pipelines. Please stay tuned. - -# Speech Recognition -`examples/speech_recognition` is implementing ASR task in Fairseq, along with needed features, datasets, models and loss functions to train and infer model described in [Transformers with convolutional context for ASR (Abdelrahman Mohamed et al., 2019)](https://arxiv.org/abs/1904.11660). - - -## Additional dependencies -On top of main fairseq dependencies there are couple more additional requirements. - -1) Please follow the instructions to install [torchaudio](https://github.com/pytorch/audio). This is required to compute audio fbank features. -2) [Sclite](http://www1.icsi.berkeley.edu/Speech/docs/sctk-1.2/sclite.htm#sclite_name_0) is used to measure WER. Sclite can be downloaded and installed from source from sctk package [here](http://www.openslr.org/4/). Training and inference doesn't require Sclite dependency. -3) [sentencepiece](https://github.com/google/sentencepiece) is required in order to create dataset with word-piece targets. - -## Preparing librispeech data -``` -./examples/speech_recognition/datasets/prepare-librispeech.sh $DIR_TO_SAVE_RAW_DATA $DIR_FOR_PREPROCESSED_DATA -``` - -## Training librispeech data -``` -python train.py $DIR_FOR_PREPROCESSED_DATA --save-dir $MODEL_PATH --max-epoch 80 --task speech_recognition --arch vggtransformer_2 --optimizer adadelta --lr 1.0 --adadelta-eps 1e-8 --adadelta-rho 0.95 --clip-norm 10.0 --max-tokens 5000 --log-format json --log-interval 1 --criterion cross_entropy_acc --user-dir examples/speech_recognition/ -``` - -## Inference for librispeech -`$SET` can be `test_clean` or `test_other` -Any checkpoint in `$MODEL_PATH` can be selected. In this example we are working with `checkpoint_last.pt` -``` -python examples/speech_recognition/infer.py $DIR_FOR_PREPROCESSED_DATA --task speech_recognition --max-tokens 25000 --nbest 1 --path $MODEL_PATH/checkpoint_last.pt --beam 20 --results-path $RES_DIR --batch-size 40 --gen-subset $SET --user-dir examples/speech_recognition/ -``` - -## Inference for librispeech -``` -sclite -r ${RES_DIR}/ref.word-checkpoint_last.pt-${SET}.txt -h ${RES_DIR}/hypo.word-checkpoint_last.pt-${SET}.txt -i rm -o all stdout > $RES_REPORT -``` -`Sum/Avg` row from first table of the report has WER - -## Using flashlight (previously called [wav2letter](https://github.com/facebookresearch/wav2letter)) components -[flashlight](https://github.com/facebookresearch/flashlight) now has integration with fairseq. Currently this includes: - -* AutoSegmentationCriterion (ASG) -* flashlight-style Conv/GLU model -* flashlight's beam search decoder - -To use these, follow the instructions on [this page](https://github.com/facebookresearch/flashlight/tree/master/bindings/python) to install python bindings. - -## Training librispeech data (flashlight style, Conv/GLU + ASG loss) -Training command: -``` -python train.py $DIR_FOR_PREPROCESSED_DATA --save-dir $MODEL_PATH --max-epoch 100 --task speech_recognition --arch w2l_conv_glu_enc --batch-size 4 --optimizer sgd --lr 0.3,0.8 --momentum 0.8 --clip-norm 0.2 --max-tokens 50000 --log-format json --log-interval 100 --num-workers 0 --sentence-avg --criterion asg_loss --asg-transitions-init 5 --max-replabel 2 --linseg-updates 8789 --user-dir examples/speech_recognition -``` - -Note that ASG loss currently doesn't do well with word-pieces. You should prepare a dataset with character targets by setting `nbpe=31` in `prepare-librispeech.sh`. - -## Inference for librispeech (flashlight decoder, n-gram LM) -Inference command: -``` -python examples/speech_recognition/infer.py $DIR_FOR_PREPROCESSED_DATA --task speech_recognition --seed 1 --nbest 1 --path $MODEL_PATH/checkpoint_last.pt --gen-subset $SET --results-path $RES_DIR --w2l-decoder kenlm --kenlm-model $KENLM_MODEL_PATH --lexicon $LEXICON_PATH --beam 200 --beam-threshold 15 --lm-weight 1.5 --word-score 1.5 --sil-weight -0.3 --criterion asg_loss --max-replabel 2 --user-dir examples/speech_recognition -``` - -`$KENLM_MODEL_PATH` should be a standard n-gram language model file. `$LEXICON_PATH` should be a flashlight-style lexicon (list of known words and their spellings). For ASG inference, a lexicon line should look like this (note the repetition labels): -``` -doorbell D O 1 R B E L 1 ▁ -``` -For CTC inference with word-pieces, repetition labels are not used and the lexicon should have most common spellings for each word (one can use sentencepiece's `NBestEncodeAsPieces` for this): -``` -doorbell ▁DOOR BE LL -doorbell ▁DOOR B E LL -doorbell ▁DO OR BE LL -doorbell ▁DOOR B EL L -doorbell ▁DOOR BE L L -doorbell ▁DO OR B E LL -doorbell ▁DOOR B E L L -doorbell ▁DO OR B EL L -doorbell ▁DO O R BE LL -doorbell ▁DO OR BE L L -``` -Lowercase vs. uppercase matters: the *word* should match the case of the n-gram language model (i.e. `$KENLM_MODEL_PATH`), while the *spelling* should match the case of the token dictionary (i.e. `$DIR_FOR_PREPROCESSED_DATA/dict.txt`). - -## Inference for librispeech (flashlight decoder, viterbi only) -Inference command: -``` -python examples/speech_recognition/infer.py $DIR_FOR_PREPROCESSED_DATA --task speech_recognition --seed 1 --nbest 1 --path $MODEL_PATH/checkpoint_last.pt --gen-subset $SET --results-path $RES_DIR --w2l-decoder viterbi --criterion asg_loss --max-replabel 2 --user-dir examples/speech_recognition -``` diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/datasets/prepare-librispeech.sh b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/datasets/prepare-librispeech.sh deleted file mode 100644 index 9e9297f08947027685ff508bfa91ff26b0d8ea0c..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/datasets/prepare-librispeech.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# Prepare librispeech dataset - -base_url=www.openslr.org/resources/12 -train_dir=train_960 - -if [ "$#" -ne 2 ]; then - echo "Usage: $0 " - echo "e.g.: $0 /tmp/librispeech_raw/ ~/data/librispeech_final" - exit 1 -fi - -download_dir=${1%/} -out_dir=${2%/} - -fairseq_root=~/fairseq-py/ -mkdir -p ${out_dir} -cd ${out_dir} || exit - -nbpe=5000 -bpemode=unigram - -if [ ! -d "$fairseq_root" ]; then - echo "$0: Please set correct fairseq_root" - exit 1 -fi - -echo "Data Download" -for part in dev-clean test-clean dev-other test-other train-clean-100 train-clean-360 train-other-500; do - url=$base_url/$part.tar.gz - if ! wget -P $download_dir $url; then - echo "$0: wget failed for $url" - exit 1 - fi - if ! tar -C $download_dir -xvzf $download_dir/$part.tar.gz; then - echo "$0: error un-tarring archive $download_dir/$part.tar.gz" - exit 1 - fi -done - -echo "Merge all train packs into one" -mkdir -p ${download_dir}/LibriSpeech/${train_dir}/ -for part in train-clean-100 train-clean-360 train-other-500; do - mv ${download_dir}/LibriSpeech/${part}/* $download_dir/LibriSpeech/${train_dir}/ -done -echo "Merge train text" -find ${download_dir}/LibriSpeech/${train_dir}/ -name '*.txt' -exec cat {} \; >> ${download_dir}/LibriSpeech/${train_dir}/text - -# Use combined dev-clean and dev-other as validation set -find ${download_dir}/LibriSpeech/dev-clean/ ${download_dir}/LibriSpeech/dev-other/ -name '*.txt' -exec cat {} \; >> ${download_dir}/LibriSpeech/valid_text -find ${download_dir}/LibriSpeech/test-clean/ -name '*.txt' -exec cat {} \; >> ${download_dir}/LibriSpeech/test-clean/text -find ${download_dir}/LibriSpeech/test-other/ -name '*.txt' -exec cat {} \; >> ${download_dir}/LibriSpeech/test-other/text - - -dict=data/lang_char/${train_dir}_${bpemode}${nbpe}_units.txt -encoded=data/lang_char/${train_dir}_${bpemode}${nbpe}_encoded.txt -fairseq_dict=data/lang_char/${train_dir}_${bpemode}${nbpe}_fairseq_dict.txt -bpemodel=data/lang_char/${train_dir}_${bpemode}${nbpe} -echo "dictionary: ${dict}" -echo "Dictionary preparation" -mkdir -p data/lang_char/ -echo " 3" > ${dict} -echo " 2" >> ${dict} -echo " 1" >> ${dict} -cut -f 2- -d" " ${download_dir}/LibriSpeech/${train_dir}/text > data/lang_char/input.txt -spm_train --input=data/lang_char/input.txt --vocab_size=${nbpe} --model_type=${bpemode} --model_prefix=${bpemodel} --input_sentence_size=100000000 --unk_id=3 --eos_id=2 --pad_id=1 --bos_id=-1 --character_coverage=1 -spm_encode --model=${bpemodel}.model --output_format=piece < data/lang_char/input.txt > ${encoded} -cat ${encoded} | tr ' ' '\n' | sort | uniq | awk '{print $0 " " NR+3}' >> ${dict} -cat ${encoded} | tr ' ' '\n' | sort | uniq -c | awk '{print $2 " " $1}' > ${fairseq_dict} -wc -l ${dict} - -echo "Prepare train and test jsons" -for part in train_960 test-other test-clean; do - python ${fairseq_root}/examples/speech_recognition/datasets/asr_prep_json.py --audio-dirs ${download_dir}/LibriSpeech/${part} --labels ${download_dir}/LibriSpeech/${part}/text --spm-model ${bpemodel}.model --audio-format flac --dictionary ${fairseq_dict} --output ${part}.json -done -# fairseq expects to find train.json and valid.json during training -mv train_960.json train.json - -echo "Prepare valid json" -python ${fairseq_root}/examples/speech_recognition/datasets/asr_prep_json.py --audio-dirs ${download_dir}/LibriSpeech/dev-clean ${download_dir}/LibriSpeech/dev-other --labels ${download_dir}/LibriSpeech/valid_text --spm-model ${bpemodel}.model --audio-format flac --dictionary ${fairseq_dict} --output valid.json - -cp ${fairseq_dict} ./dict.txt -cp ${bpemodel}.model ./spm.model diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quantization/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quantization/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OpenDILabCommunity/LLMRiddlesChatGPTEN/llmriddles/llms/mistral.py b/spaces/OpenDILabCommunity/LLMRiddlesChatGPTEN/llmriddles/llms/mistral.py deleted file mode 100644 index b85e2655660685afe7ebec6aed59a68eccd49e41..0000000000000000000000000000000000000000 --- a/spaces/OpenDILabCommunity/LLMRiddlesChatGPTEN/llmriddles/llms/mistral.py +++ /dev/null @@ -1,22 +0,0 @@ -from functools import lru_cache - -from .base import register_llm -from .llm_client import LLMFlaskClient - - -@lru_cache() -def _get_mistral_7b_instruct_server(host: str, port: int): - from .llm_server import LLMInstance, create_app - core = LLMInstance('Mistral-7B-Instruct-v0.1') - app = create_app(core) - app.run(host=host, port=port) - - -def ask_mistral_7b_instruct(message: str, **kwargs): - host, port = '0.0.0.0', 8001 - _get_mistral_7b_instruct_server(host, port) - client = LLMFlaskClient(host, port) - return client.run(message).strip() - - -register_llm('mistral-7b', ask_mistral_7b_instruct) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/inpainting_src/ldm_inpainting/ldm/modules/image_degradation/__init__.py b/spaces/OpenGVLab/InternGPT/iGPT/models/inpainting_src/ldm_inpainting/ldm/modules/image_degradation/__init__.py deleted file mode 100644 index 7836cada81f90ded99c58d5942eea4c3477f58fc..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/inpainting_src/ldm_inpainting/ldm/modules/image_degradation/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr -from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/archs/tools/embeddings.py b/spaces/OpenMotionLab/MotionGPT/mGPT/archs/tools/embeddings.py deleted file mode 100644 index b53470ce9491addf70d28726571330c834a88e68..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/mGPT/archs/tools/embeddings.py +++ /dev/null @@ -1,322 +0,0 @@ -# This file is taken from signjoey repository -import math - -import torch -from torch import Tensor, nn - - -def get_activation(activation_type): - if activation_type == "relu": - return nn.ReLU() - elif activation_type == "relu6": - return nn.ReLU6() - elif activation_type == "prelu": - return nn.PReLU() - elif activation_type == "selu": - return nn.SELU() - elif activation_type == "celu": - return nn.CELU() - elif activation_type == "gelu": - return nn.GELU() - elif activation_type == "sigmoid": - return nn.Sigmoid() - elif activation_type == "softplus": - return nn.Softplus() - elif activation_type == "softshrink": - return nn.Softshrink() - elif activation_type == "softsign": - return nn.Softsign() - elif activation_type == "tanh": - return nn.Tanh() - elif activation_type == "tanhshrink": - return nn.Tanhshrink() - else: - raise ValueError("Unknown activation type {}".format(activation_type)) - - -class MaskedNorm(nn.Module): - """ - Original Code from: - https://discuss.pytorch.org/t/batchnorm-for-different-sized-samples-in-batch/44251/8 - """ - - def __init__(self, norm_type, num_groups, num_features): - super().__init__() - self.norm_type = norm_type - if self.norm_type == "batch": - self.norm = nn.BatchNorm1d(num_features=num_features) - elif self.norm_type == "group": - self.norm = nn.GroupNorm(num_groups=num_groups, num_channels=num_features) - elif self.norm_type == "layer": - self.norm = nn.LayerNorm(normalized_shape=num_features) - else: - raise ValueError("Unsupported Normalization Layer") - - self.num_features = num_features - - def forward(self, x: Tensor, mask: Tensor): - if self.training: - reshaped = x.reshape([-1, self.num_features]) - reshaped_mask = mask.reshape([-1, 1]) > 0 - selected = torch.masked_select(reshaped, reshaped_mask).reshape( - [-1, self.num_features] - ) - batch_normed = self.norm(selected) - scattered = reshaped.masked_scatter(reshaped_mask, batch_normed) - return scattered.reshape([x.shape[0], -1, self.num_features]) - else: - reshaped = x.reshape([-1, self.num_features]) - batched_normed = self.norm(reshaped) - return batched_normed.reshape([x.shape[0], -1, self.num_features]) - - -# TODO (Cihan): Spatial and Word Embeddings are pretty much the same -# We might as well convert them into a single module class. -# Only difference is the lut vs linear layers. -class Embeddings(nn.Module): - - """ - Simple embeddings class - """ - - # pylint: disable=unused-argument - def __init__( - self, - embedding_dim: int = 64, - num_heads: int = 8, - scale: bool = False, - scale_factor: float = None, - norm_type: str = None, - activation_type: str = None, - vocab_size: int = 0, - padding_idx: int = 1, - freeze: bool = False, - **kwargs - ): - """ - Create new embeddings for the vocabulary. - Use scaling for the Transformer. - - :param embedding_dim: - :param scale: - :param vocab_size: - :param padding_idx: - :param freeze: freeze the embeddings during training - """ - super().__init__() - - self.embedding_dim = embedding_dim - self.vocab_size = vocab_size - self.lut = nn.Embedding(vocab_size, self.embedding_dim, padding_idx=padding_idx) - - self.norm_type = norm_type - if self.norm_type: - self.norm = MaskedNorm( - norm_type=norm_type, num_groups=num_heads, num_features=embedding_dim - ) - - self.activation_type = activation_type - if self.activation_type: - self.activation = get_activation(activation_type) - - self.scale = scale - if self.scale: - if scale_factor: - self.scale_factor = scale_factor - else: - self.scale_factor = math.sqrt(self.embedding_dim) - - if freeze: - freeze_params(self) - - # pylint: disable=arguments-differ - def forward(self, x: Tensor, mask: Tensor = None) -> Tensor: - """ - Perform lookup for input `x` in the embedding table. - - :param mask: token masks - :param x: index in the vocabulary - :return: embedded representation for `x` - """ - - x = self.lut(x) - - if self.norm_type: - x = self.norm(x, mask) - - if self.activation_type: - x = self.activation(x) - - if self.scale: - return x * self.scale_factor - else: - return x - - def __repr__(self): - return "%s(embedding_dim=%d, vocab_size=%d)" % ( - self.__class__.__name__, - self.embedding_dim, - self.vocab_size, - ) - - -class SpatialEmbeddings(nn.Module): - - """ - Simple Linear Projection Layer - (For encoder outputs to predict glosses) - """ - - # pylint: disable=unused-argument - def __init__( - self, - embedding_dim: int, - input_size: int, - num_heads: int, - freeze: bool = False, - norm_type: str = "batch", - activation_type: str = "softsign", - scale: bool = False, - scale_factor: float = None, - **kwargs - ): - """ - Create new embeddings for the vocabulary. - Use scaling for the Transformer. - - :param embedding_dim: - :param input_size: - :param freeze: freeze the embeddings during training - """ - super().__init__() - - self.embedding_dim = embedding_dim - self.input_size = input_size - self.ln = nn.Linear(self.input_size, self.embedding_dim) - - self.norm_type = norm_type - if self.norm_type: - self.norm = MaskedNorm( - norm_type=norm_type, num_groups=num_heads, num_features=embedding_dim - ) - - self.activation_type = activation_type - if self.activation_type: - self.activation = get_activation(activation_type) - - self.scale = scale - if self.scale: - if scale_factor: - self.scale_factor = scale_factor - else: - self.scale_factor = math.sqrt(self.embedding_dim) - - if freeze: - freeze_params(self) - - # pylint: disable=arguments-differ - def forward(self, x: Tensor, mask: Tensor) -> Tensor: - """ - :param mask: frame masks - :param x: input frame features - :return: embedded representation for `x` - """ - - x = self.ln(x) - - if self.norm_type: - x = self.norm(x, mask) - - if self.activation_type: - x = self.activation(x) - - if self.scale: - return x * self.scale_factor - else: - return x - - def __repr__(self): - return "%s(embedding_dim=%d, input_size=%d)" % ( - self.__class__.__name__, - self.embedding_dim, - self.input_size, - ) - -def get_timestep_embedding( - timesteps: torch.Tensor, - embedding_dim: int, - flip_sin_to_cos: bool = False, - downscale_freq_shift: float = 1, - scale: float = 1, - max_period: int = 10000, -): - """ - This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings. - - :param timesteps: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the - embeddings. :return: an [N x dim] Tensor of positional embeddings. - """ - assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" - - half_dim = embedding_dim // 2 - exponent = -math.log(max_period) * torch.arange( - start=0, end=half_dim, dtype=torch.float32, device=timesteps.device - ) - exponent = exponent / (half_dim - downscale_freq_shift) - - emb = torch.exp(exponent) - emb = timesteps[:, None].float() * emb[None, :] - - # scale embeddings - emb = scale * emb - - # concat sine and cosine embeddings - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) - - # flip sine and cosine embeddings - if flip_sin_to_cos: - emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1) - - # zero pad - if embedding_dim % 2 == 1: - emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) - return emb - - -class TimestepEmbedding(nn.Module): - def __init__(self, channel: int, time_embed_dim: int, act_fn: str = "silu"): - super().__init__() - - self.linear_1 = nn.Linear(channel, time_embed_dim) - self.act = None - if act_fn == "silu": - self.act = nn.SiLU() - self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim) - - def forward(self, sample): - sample = self.linear_1(sample) - - if self.act is not None: - sample = self.act(sample) - - sample = self.linear_2(sample) - return sample - - -class Timesteps(nn.Module): - def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float): - super().__init__() - self.num_channels = num_channels - self.flip_sin_to_cos = flip_sin_to_cos - self.downscale_freq_shift = downscale_freq_shift - - def forward(self, timesteps): - t_emb = get_timestep_embedding( - timesteps, - self.num_channels, - flip_sin_to_cos=self.flip_sin_to_cos, - downscale_freq_shift=self.downscale_freq_shift, - ) - return t_emb diff --git a/spaces/Owechada/roopfaceswapr/roop/capturer.py b/spaces/Owechada/roopfaceswapr/roop/capturer.py deleted file mode 100644 index fd49d468dd4cd45832ab9612205968207a6f45cf..0000000000000000000000000000000000000000 --- a/spaces/Owechada/roopfaceswapr/roop/capturer.py +++ /dev/null @@ -1,20 +0,0 @@ -from typing import Any -import cv2 - - -def get_video_frame(video_path: str, frame_number: int = 0) -> Any: - capture = cv2.VideoCapture(video_path) - frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT) - capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1)) - has_frame, frame = capture.read() - capture.release() - if has_frame: - return frame - return None - - -def get_video_frame_total(video_path: str) -> int: - capture = cv2.VideoCapture(video_path) - video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) - capture.release() - return video_frame_total diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/exp/upernet_global_small/test.sh b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/exp/upernet_global_small/test.sh deleted file mode 100644 index d9a85e7a0d3b7c96b060f473d41254b37a382fcb..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/exp/upernet_global_small/test.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -work_path=$(dirname $0) -PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ -python -m torch.distributed.launch --nproc_per_node=8 \ - tools/test.py ${work_path}/test_config_h32.py \ - ${work_path}/ckpt/latest.pth \ - --launcher pytorch \ - --eval mIoU \ - 2>&1 | tee -a ${work_path}/log.txt diff --git a/spaces/Paaz/gpt2-lyrics/README.md b/spaces/Paaz/gpt2-lyrics/README.md deleted file mode 100644 index ec8a4d75219fa9a300afaa9c64f218972ca04d70..0000000000000000000000000000000000000000 --- a/spaces/Paaz/gpt2-lyrics/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: GPT-2 Lyric Generator -emoji: 🎶 -colorFrom: indigo -colorTo: green -sdk: gradio -sdk_version: 3.0.19 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/system/base/language.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/system/base/language.go deleted file mode 100644 index 72470be9c188ef63f31eb941012abc69e19f9be7..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/system/base/language.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/Llama-2-AWS/app.py b/spaces/PeepDaSlan9/Llama-2-AWS/app.py deleted file mode 100644 index aa6f2d4f0549263e5f729f10dc744293971a362d..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/Llama-2-AWS/app.py +++ /dev/null @@ -1,36 +0,0 @@ -import streamlit as st -import responses - -# Title -st.title("My Conversational Agent") - - -# Initialize chat history -if "messages" not in st.session_state: - st.session_state.messages = [] - - -# Display chat messages from history on app rerun -for message in st.session_state.messages: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - - -# React to user input -if prompt := st.chat_input("What's in your mind?"): - - # Display user message in chat message container - st.chat_message("user").markdown(prompt) - - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt}) - - # get response - response = responses.get_response(prompt) - - # Display assistant response in chat message container - with st.chat_message("assistant"): - st.markdown(response) - - # Add assistant response to chat history - st.session_state.messages.append({"role": "assistant", "content": response}) \ No newline at end of file diff --git a/spaces/Plachta/VALL-E-X/README.md b/spaces/Plachta/VALL-E-X/README.md deleted file mode 100644 index bd15097d76151ada1e48e5d4fafba35444e2e908..0000000000000000000000000000000000000000 --- a/spaces/Plachta/VALL-E-X/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: VALL E X -emoji: 🎙 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/provider.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/provider.py deleted file mode 100644 index 6300dfc57f051e461776b82591471c7dc7fc486d..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/provider.py +++ /dev/null @@ -1,248 +0,0 @@ -import collections -import math -from typing import ( - TYPE_CHECKING, - Dict, - Iterable, - Iterator, - Mapping, - Sequence, - TypeVar, - Union, -) - -from pip._vendor.resolvelib.providers import AbstractProvider - -from .base import Candidate, Constraint, Requirement -from .candidates import REQUIRES_PYTHON_IDENTIFIER -from .factory import Factory - -if TYPE_CHECKING: - from pip._vendor.resolvelib.providers import Preference - from pip._vendor.resolvelib.resolvers import RequirementInformation - - PreferenceInformation = RequirementInformation[Requirement, Candidate] - - _ProviderBase = AbstractProvider[Requirement, Candidate, str] -else: - _ProviderBase = AbstractProvider - -# Notes on the relationship between the provider, the factory, and the -# candidate and requirement classes. -# -# The provider is a direct implementation of the resolvelib class. Its role -# is to deliver the API that resolvelib expects. -# -# Rather than work with completely abstract "requirement" and "candidate" -# concepts as resolvelib does, pip has concrete classes implementing these two -# ideas. The API of Requirement and Candidate objects are defined in the base -# classes, but essentially map fairly directly to the equivalent provider -# methods. In particular, `find_matches` and `is_satisfied_by` are -# requirement methods, and `get_dependencies` is a candidate method. -# -# The factory is the interface to pip's internal mechanisms. It is stateless, -# and is created by the resolver and held as a property of the provider. It is -# responsible for creating Requirement and Candidate objects, and provides -# services to those objects (access to pip's finder and preparer). - - -D = TypeVar("D") -V = TypeVar("V") - - -def _get_with_identifier( - mapping: Mapping[str, V], - identifier: str, - default: D, -) -> Union[D, V]: - """Get item from a package name lookup mapping with a resolver identifier. - - This extra logic is needed when the target mapping is keyed by package - name, which cannot be directly looked up with an identifier (which may - contain requested extras). Additional logic is added to also look up a value - by "cleaning up" the extras from the identifier. - """ - if identifier in mapping: - return mapping[identifier] - # HACK: Theoretically we should check whether this identifier is a valid - # "NAME[EXTRAS]" format, and parse out the name part with packaging or - # some regular expression. But since pip's resolver only spits out three - # kinds of identifiers: normalized PEP 503 names, normalized names plus - # extras, and Requires-Python, we can cheat a bit here. - name, open_bracket, _ = identifier.partition("[") - if open_bracket and name in mapping: - return mapping[name] - return default - - -class PipProvider(_ProviderBase): - """Pip's provider implementation for resolvelib. - - :params constraints: A mapping of constraints specified by the user. Keys - are canonicalized project names. - :params ignore_dependencies: Whether the user specified ``--no-deps``. - :params upgrade_strategy: The user-specified upgrade strategy. - :params user_requested: A set of canonicalized package names that the user - supplied for pip to install/upgrade. - """ - - def __init__( - self, - factory: Factory, - constraints: Dict[str, Constraint], - ignore_dependencies: bool, - upgrade_strategy: str, - user_requested: Dict[str, int], - ) -> None: - self._factory = factory - self._constraints = constraints - self._ignore_dependencies = ignore_dependencies - self._upgrade_strategy = upgrade_strategy - self._user_requested = user_requested - self._known_depths: Dict[str, float] = collections.defaultdict(lambda: math.inf) - - def identify(self, requirement_or_candidate: Union[Requirement, Candidate]) -> str: - return requirement_or_candidate.name - - def get_preference( # type: ignore - self, - identifier: str, - resolutions: Mapping[str, Candidate], - candidates: Mapping[str, Iterator[Candidate]], - information: Mapping[str, Iterable["PreferenceInformation"]], - backtrack_causes: Sequence["PreferenceInformation"], - ) -> "Preference": - """Produce a sort key for given requirement based on preference. - - The lower the return value is, the more preferred this group of - arguments is. - - Currently pip considers the following in order: - - * Prefer if any of the known requirements is "direct", e.g. points to an - explicit URL. - * If equal, prefer if any requirement is "pinned", i.e. contains - operator ``===`` or ``==``. - * If equal, calculate an approximate "depth" and resolve requirements - closer to the user-specified requirements first. - * Order user-specified requirements by the order they are specified. - * If equal, prefers "non-free" requirements, i.e. contains at least one - operator, such as ``>=`` or ``<``. - * If equal, order alphabetically for consistency (helps debuggability). - """ - lookups = (r.get_candidate_lookup() for r, _ in information[identifier]) - candidate, ireqs = zip(*lookups) - operators = [ - specifier.operator - for specifier_set in (ireq.specifier for ireq in ireqs if ireq) - for specifier in specifier_set - ] - - direct = candidate is not None - pinned = any(op[:2] == "==" for op in operators) - unfree = bool(operators) - - try: - requested_order: Union[int, float] = self._user_requested[identifier] - except KeyError: - requested_order = math.inf - parent_depths = ( - self._known_depths[parent.name] if parent is not None else 0.0 - for _, parent in information[identifier] - ) - inferred_depth = min(d for d in parent_depths) + 1.0 - else: - inferred_depth = 1.0 - self._known_depths[identifier] = inferred_depth - - requested_order = self._user_requested.get(identifier, math.inf) - - # Requires-Python has only one candidate and the check is basically - # free, so we always do it first to avoid needless work if it fails. - requires_python = identifier == REQUIRES_PYTHON_IDENTIFIER - - # HACK: Setuptools have a very long and solid backward compatibility - # track record, and extremely few projects would request a narrow, - # non-recent version range of it since that would break a lot things. - # (Most projects specify it only to request for an installer feature, - # which does not work, but that's another topic.) Intentionally - # delaying Setuptools helps reduce branches the resolver has to check. - # This serves as a temporary fix for issues like "apache-airflow[all]" - # while we work on "proper" branch pruning techniques. - delay_this = identifier == "setuptools" - - # Prefer the causes of backtracking on the assumption that the problem - # resolving the dependency tree is related to the failures that caused - # the backtracking - backtrack_cause = self.is_backtrack_cause(identifier, backtrack_causes) - - return ( - not requires_python, - delay_this, - not direct, - not pinned, - not backtrack_cause, - inferred_depth, - requested_order, - not unfree, - identifier, - ) - - def find_matches( - self, - identifier: str, - requirements: Mapping[str, Iterator[Requirement]], - incompatibilities: Mapping[str, Iterator[Candidate]], - ) -> Iterable[Candidate]: - def _eligible_for_upgrade(identifier: str) -> bool: - """Are upgrades allowed for this project? - - This checks the upgrade strategy, and whether the project was one - that the user specified in the command line, in order to decide - whether we should upgrade if there's a newer version available. - - (Note that we don't need access to the `--upgrade` flag, because - an upgrade strategy of "to-satisfy-only" means that `--upgrade` - was not specified). - """ - if self._upgrade_strategy == "eager": - return True - elif self._upgrade_strategy == "only-if-needed": - user_order = _get_with_identifier( - self._user_requested, - identifier, - default=None, - ) - return user_order is not None - return False - - constraint = _get_with_identifier( - self._constraints, - identifier, - default=Constraint.empty(), - ) - return self._factory.find_candidates( - identifier=identifier, - requirements=requirements, - constraint=constraint, - prefers_installed=(not _eligible_for_upgrade(identifier)), - incompatibilities=incompatibilities, - ) - - def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool: - return requirement.is_satisfied_by(candidate) - - def get_dependencies(self, candidate: Candidate) -> Sequence[Requirement]: - with_requires = not self._ignore_dependencies - return [r for r in candidate.iter_dependencies(with_requires) if r is not None] - - @staticmethod - def is_backtrack_cause( - identifier: str, backtrack_causes: Sequence["PreferenceInformation"] - ) -> bool: - for backtrack_cause in backtrack_causes: - if identifier == backtrack_cause.requirement.name: - return True - if backtrack_cause.parent and identifier == backtrack_cause.parent.name: - return True - return False diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pep517/wrappers.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pep517/wrappers.py deleted file mode 100644 index 987a62aaa99beceaf646ac770c70d19050610e92..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pep517/wrappers.py +++ /dev/null @@ -1,362 +0,0 @@ -import json -import os -import sys -import tempfile -import threading -from contextlib import contextmanager -from os.path import abspath -from os.path import join as pjoin -from subprocess import STDOUT, check_call, check_output - -from .in_process import _in_proc_script_path - -__all__ = [ - 'BackendUnavailable', - 'BackendInvalid', - 'HookMissing', - 'UnsupportedOperation', - 'default_subprocess_runner', - 'quiet_subprocess_runner', - 'Pep517HookCaller', -] - - -def write_json(obj, path, **kwargs): - with open(path, 'w', encoding='utf-8') as f: - json.dump(obj, f, **kwargs) - - -def read_json(path): - with open(path, encoding='utf-8') as f: - return json.load(f) - - -class BackendUnavailable(Exception): - """Will be raised if the backend cannot be imported in the hook process.""" - def __init__(self, traceback): - self.traceback = traceback - - -class BackendInvalid(Exception): - """Will be raised if the backend is invalid.""" - def __init__(self, backend_name, backend_path, message): - self.backend_name = backend_name - self.backend_path = backend_path - self.message = message - - -class HookMissing(Exception): - """Will be raised on missing hooks.""" - def __init__(self, hook_name): - super().__init__(hook_name) - self.hook_name = hook_name - - -class UnsupportedOperation(Exception): - """May be raised by build_sdist if the backend indicates that it can't.""" - def __init__(self, traceback): - self.traceback = traceback - - -def default_subprocess_runner(cmd, cwd=None, extra_environ=None): - """The default method of calling the wrapper subprocess.""" - env = os.environ.copy() - if extra_environ: - env.update(extra_environ) - - check_call(cmd, cwd=cwd, env=env) - - -def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None): - """A method of calling the wrapper subprocess while suppressing output.""" - env = os.environ.copy() - if extra_environ: - env.update(extra_environ) - - check_output(cmd, cwd=cwd, env=env, stderr=STDOUT) - - -def norm_and_check(source_tree, requested): - """Normalise and check a backend path. - - Ensure that the requested backend path is specified as a relative path, - and resolves to a location under the given source tree. - - Return an absolute version of the requested path. - """ - if os.path.isabs(requested): - raise ValueError("paths must be relative") - - abs_source = os.path.abspath(source_tree) - abs_requested = os.path.normpath(os.path.join(abs_source, requested)) - # We have to use commonprefix for Python 2.7 compatibility. So we - # normalise case to avoid problems because commonprefix is a character - # based comparison :-( - norm_source = os.path.normcase(abs_source) - norm_requested = os.path.normcase(abs_requested) - if os.path.commonprefix([norm_source, norm_requested]) != norm_source: - raise ValueError("paths must be inside source tree") - - return abs_requested - - -class Pep517HookCaller: - """A wrapper around a source directory to be built with a PEP 517 backend. - - :param source_dir: The path to the source directory, containing - pyproject.toml. - :param build_backend: The build backend spec, as per PEP 517, from - pyproject.toml. - :param backend_path: The backend path, as per PEP 517, from pyproject.toml. - :param runner: A callable that invokes the wrapper subprocess. - :param python_executable: The Python executable used to invoke the backend - - The 'runner', if provided, must expect the following: - - - cmd: a list of strings representing the command and arguments to - execute, as would be passed to e.g. 'subprocess.check_call'. - - cwd: a string representing the working directory that must be - used for the subprocess. Corresponds to the provided source_dir. - - extra_environ: a dict mapping environment variable names to values - which must be set for the subprocess execution. - """ - def __init__( - self, - source_dir, - build_backend, - backend_path=None, - runner=None, - python_executable=None, - ): - if runner is None: - runner = default_subprocess_runner - - self.source_dir = abspath(source_dir) - self.build_backend = build_backend - if backend_path: - backend_path = [ - norm_and_check(self.source_dir, p) for p in backend_path - ] - self.backend_path = backend_path - self._subprocess_runner = runner - if not python_executable: - python_executable = sys.executable - self.python_executable = python_executable - - @contextmanager - def subprocess_runner(self, runner): - """A context manager for temporarily overriding the default subprocess - runner. - """ - prev = self._subprocess_runner - self._subprocess_runner = runner - try: - yield - finally: - self._subprocess_runner = prev - - def _supported_features(self): - """Return the list of optional features supported by the backend.""" - return self._call_hook('_supported_features', {}) - - def get_requires_for_build_wheel(self, config_settings=None): - """Identify packages required for building a wheel - - Returns a list of dependency specifications, e.g.:: - - ["wheel >= 0.25", "setuptools"] - - This does not include requirements specified in pyproject.toml. - It returns the result of calling the equivalently named hook in a - subprocess. - """ - return self._call_hook('get_requires_for_build_wheel', { - 'config_settings': config_settings - }) - - def prepare_metadata_for_build_wheel( - self, metadata_directory, config_settings=None, - _allow_fallback=True): - """Prepare a ``*.dist-info`` folder with metadata for this project. - - Returns the name of the newly created folder. - - If the build backend defines a hook with this name, it will be called - in a subprocess. If not, the backend will be asked to build a wheel, - and the dist-info extracted from that (unless _allow_fallback is - False). - """ - return self._call_hook('prepare_metadata_for_build_wheel', { - 'metadata_directory': abspath(metadata_directory), - 'config_settings': config_settings, - '_allow_fallback': _allow_fallback, - }) - - def build_wheel( - self, wheel_directory, config_settings=None, - metadata_directory=None): - """Build a wheel from this project. - - Returns the name of the newly created file. - - In general, this will call the 'build_wheel' hook in the backend. - However, if that was previously called by - 'prepare_metadata_for_build_wheel', and the same metadata_directory is - used, the previously built wheel will be copied to wheel_directory. - """ - if metadata_directory is not None: - metadata_directory = abspath(metadata_directory) - return self._call_hook('build_wheel', { - 'wheel_directory': abspath(wheel_directory), - 'config_settings': config_settings, - 'metadata_directory': metadata_directory, - }) - - def get_requires_for_build_editable(self, config_settings=None): - """Identify packages required for building an editable wheel - - Returns a list of dependency specifications, e.g.:: - - ["wheel >= 0.25", "setuptools"] - - This does not include requirements specified in pyproject.toml. - It returns the result of calling the equivalently named hook in a - subprocess. - """ - return self._call_hook('get_requires_for_build_editable', { - 'config_settings': config_settings - }) - - def prepare_metadata_for_build_editable( - self, metadata_directory, config_settings=None, - _allow_fallback=True): - """Prepare a ``*.dist-info`` folder with metadata for this project. - - Returns the name of the newly created folder. - - If the build backend defines a hook with this name, it will be called - in a subprocess. If not, the backend will be asked to build an editable - wheel, and the dist-info extracted from that (unless _allow_fallback is - False). - """ - return self._call_hook('prepare_metadata_for_build_editable', { - 'metadata_directory': abspath(metadata_directory), - 'config_settings': config_settings, - '_allow_fallback': _allow_fallback, - }) - - def build_editable( - self, wheel_directory, config_settings=None, - metadata_directory=None): - """Build an editable wheel from this project. - - Returns the name of the newly created file. - - In general, this will call the 'build_editable' hook in the backend. - However, if that was previously called by - 'prepare_metadata_for_build_editable', and the same metadata_directory - is used, the previously built wheel will be copied to wheel_directory. - """ - if metadata_directory is not None: - metadata_directory = abspath(metadata_directory) - return self._call_hook('build_editable', { - 'wheel_directory': abspath(wheel_directory), - 'config_settings': config_settings, - 'metadata_directory': metadata_directory, - }) - - def get_requires_for_build_sdist(self, config_settings=None): - """Identify packages required for building a wheel - - Returns a list of dependency specifications, e.g.:: - - ["setuptools >= 26"] - - This does not include requirements specified in pyproject.toml. - It returns the result of calling the equivalently named hook in a - subprocess. - """ - return self._call_hook('get_requires_for_build_sdist', { - 'config_settings': config_settings - }) - - def build_sdist(self, sdist_directory, config_settings=None): - """Build an sdist from this project. - - Returns the name of the newly created file. - - This calls the 'build_sdist' backend hook in a subprocess. - """ - return self._call_hook('build_sdist', { - 'sdist_directory': abspath(sdist_directory), - 'config_settings': config_settings, - }) - - def _call_hook(self, hook_name, kwargs): - extra_environ = {'PEP517_BUILD_BACKEND': self.build_backend} - - if self.backend_path: - backend_path = os.pathsep.join(self.backend_path) - extra_environ['PEP517_BACKEND_PATH'] = backend_path - - with tempfile.TemporaryDirectory() as td: - hook_input = {'kwargs': kwargs} - write_json(hook_input, pjoin(td, 'input.json'), indent=2) - - # Run the hook in a subprocess - with _in_proc_script_path() as script: - python = self.python_executable - self._subprocess_runner( - [python, abspath(str(script)), hook_name, td], - cwd=self.source_dir, - extra_environ=extra_environ - ) - - data = read_json(pjoin(td, 'output.json')) - if data.get('unsupported'): - raise UnsupportedOperation(data.get('traceback', '')) - if data.get('no_backend'): - raise BackendUnavailable(data.get('traceback', '')) - if data.get('backend_invalid'): - raise BackendInvalid( - backend_name=self.build_backend, - backend_path=self.backend_path, - message=data.get('backend_error', '') - ) - if data.get('hook_missing'): - raise HookMissing(data.get('missing_hook_name') or hook_name) - return data['return_val'] - - -class LoggerWrapper(threading.Thread): - """ - Read messages from a pipe and redirect them - to a logger (see python's logging module). - """ - - def __init__(self, logger, level): - threading.Thread.__init__(self) - self.daemon = True - - self.logger = logger - self.level = level - - # create the pipe and reader - self.fd_read, self.fd_write = os.pipe() - self.reader = os.fdopen(self.fd_read) - - self.start() - - def fileno(self): - return self.fd_write - - @staticmethod - def remove_newline(msg): - return msg[:-1] if msg.endswith(os.linesep) else msg - - def run(self): - for line in self.reader: - self._write(self.remove_newline(line)) - - def _write(self, message): - self.logger.log(self.level, message) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/terminal_theme.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/terminal_theme.py deleted file mode 100644 index 565e9d960f8604c487e063ad9ed3f6f63027f3b4..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/terminal_theme.py +++ /dev/null @@ -1,153 +0,0 @@ -from typing import List, Optional, Tuple - -from .color_triplet import ColorTriplet -from .palette import Palette - -_ColorTuple = Tuple[int, int, int] - - -class TerminalTheme: - """A color theme used when exporting console content. - - Args: - background (Tuple[int, int, int]): The background color. - foreground (Tuple[int, int, int]): The foreground (text) color. - normal (List[Tuple[int, int, int]]): A list of 8 normal intensity colors. - bright (List[Tuple[int, int, int]], optional): A list of 8 bright colors, or None - to repeat normal intensity. Defaults to None. - """ - - def __init__( - self, - background: _ColorTuple, - foreground: _ColorTuple, - normal: List[_ColorTuple], - bright: Optional[List[_ColorTuple]] = None, - ) -> None: - self.background_color = ColorTriplet(*background) - self.foreground_color = ColorTriplet(*foreground) - self.ansi_colors = Palette(normal + (bright or normal)) - - -DEFAULT_TERMINAL_THEME = TerminalTheme( - (255, 255, 255), - (0, 0, 0), - [ - (0, 0, 0), - (128, 0, 0), - (0, 128, 0), - (128, 128, 0), - (0, 0, 128), - (128, 0, 128), - (0, 128, 128), - (192, 192, 192), - ], - [ - (128, 128, 128), - (255, 0, 0), - (0, 255, 0), - (255, 255, 0), - (0, 0, 255), - (255, 0, 255), - (0, 255, 255), - (255, 255, 255), - ], -) - -MONOKAI = TerminalTheme( - (12, 12, 12), - (217, 217, 217), - [ - (26, 26, 26), - (244, 0, 95), - (152, 224, 36), - (253, 151, 31), - (157, 101, 255), - (244, 0, 95), - (88, 209, 235), - (196, 197, 181), - (98, 94, 76), - ], - [ - (244, 0, 95), - (152, 224, 36), - (224, 213, 97), - (157, 101, 255), - (244, 0, 95), - (88, 209, 235), - (246, 246, 239), - ], -) -DIMMED_MONOKAI = TerminalTheme( - (25, 25, 25), - (185, 188, 186), - [ - (58, 61, 67), - (190, 63, 72), - (135, 154, 59), - (197, 166, 53), - (79, 118, 161), - (133, 92, 141), - (87, 143, 164), - (185, 188, 186), - (136, 137, 135), - ], - [ - (251, 0, 31), - (15, 114, 47), - (196, 112, 51), - (24, 109, 227), - (251, 0, 103), - (46, 112, 109), - (253, 255, 185), - ], -) -NIGHT_OWLISH = TerminalTheme( - (255, 255, 255), - (64, 63, 83), - [ - (1, 22, 39), - (211, 66, 62), - (42, 162, 152), - (218, 170, 1), - (72, 118, 214), - (64, 63, 83), - (8, 145, 106), - (122, 129, 129), - (122, 129, 129), - ], - [ - (247, 110, 110), - (73, 208, 197), - (218, 194, 107), - (92, 167, 228), - (105, 112, 152), - (0, 201, 144), - (152, 159, 177), - ], -) - -SVG_EXPORT_THEME = TerminalTheme( - (41, 41, 41), - (197, 200, 198), - [ - (75, 78, 85), - (204, 85, 90), - (152, 168, 75), - (208, 179, 68), - (96, 138, 177), - (152, 114, 159), - (104, 160, 179), - (197, 200, 198), - (154, 155, 153), - ], - [ - (255, 38, 39), - (0, 130, 61), - (208, 132, 66), - (25, 132, 233), - (255, 44, 122), - (57, 130, 128), - (253, 253, 197), - ], -) diff --git a/spaces/Realcat/image-matching-webui/third_party/ALIKE/README.md b/spaces/Realcat/image-matching-webui/third_party/ALIKE/README.md deleted file mode 100644 index 8f40f15c56f6c54b14bb438e47096737a440fe89..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/ALIKE/README.md +++ /dev/null @@ -1,131 +0,0 @@ -# News - -- The [ALIKED](https://github.com/Shiaoming/ALIKED) is released. -- The [ALIKE training code](https://github.com/Shiaoming/ALIKE/raw/main/assets/ALIKE_code.zip) is released. - -# ALIKE: Accurate and Lightweight Keypoint Detection and Descriptor Extraction - -ALIKE applies a differentiable keypoint detection module to detect accurate sub-pixel keypoints. The network can run at 95 frames per second for 640 x 480 images on NVIDIA Titan X (Pascal) GPU and achieve equivalent performance with the state-of-the-arts. ALIKE benefits real-time applications in resource-limited platforms/devices. Technical details are described in [this paper](https://arxiv.org/pdf/2112.02906.pdf). - -> ``` -> Xiaoming Zhao, Xingming Wu, Jinyu Miao, Weihai Chen, Peter C. Y. Chen, Zhengguo Li, "ALIKE: Accurate and Lightweight Keypoint -> Detection and Descriptor Extraction," IEEE Transactions on Multimedia, 2022. -> ``` - -![](./assets/alike.png) - - -If you use ALIKE in an academic work, please cite: - -``` -@article{Zhao2023ALIKED, - title = {ALIKED: A Lighter Keypoint and Descriptor Extraction Network via Deformable Transformation}, - url = {https://arxiv.org/pdf/2304.03608.pdf}, - doi = {10.1109/TIM.2023.3271000}, - journal = {IEEE Transactions on Instrumentation & Measurement}, - author = {Zhao, Xiaoming and Wu, Xingming and Chen, Weihai and Chen, Peter C. Y. and Xu, Qingsong and Li, Zhengguo}, - year = {2023}, - volume = {72}, - pages = {1-16}, -} - -@article{Zhao2022ALIKE, - title = {ALIKE: Accurate and Lightweight Keypoint Detection and Descriptor Extraction}, - url = {http://arxiv.org/abs/2112.02906}, - doi = {10.1109/TMM.2022.3155927}, - journal = {IEEE Transactions on Multimedia}, - author = {Zhao, Xiaoming and Wu, Xingming and Miao, Jinyu and Chen, Weihai and Chen, Peter C. Y. and Li, Zhengguo}, - month = march, - year = {2022}, -} -``` - - - -## 1. Prerequisites - -The required packages are listed in the `requirements.txt` : - -```shell -pip install -r requirements.txt -``` - - - -## 2. Models - -The off-the-shelf weights of four variant ALIKE models are provided in `models/` . - - - -## 3. Run demo - -```shell -$ python demo.py -h -usage: demo.py [-h] [--model {alike-t,alike-s,alike-n,alike-l}] - [--device DEVICE] [--top_k TOP_K] [--scores_th SCORES_TH] - [--n_limit N_LIMIT] [--no_display] [--no_sub_pixel] - input - -ALike Demo. - -positional arguments: - input Image directory or movie file or "camera0" (for - webcam0). - -optional arguments: - -h, --help show this help message and exit - --model {alike-t,alike-s,alike-n,alike-l} - The model configuration - --device DEVICE Running device (default: cuda). - --top_k TOP_K Detect top K keypoints. -1 for threshold based mode, - >0 for top K mode. (default: -1) - --scores_th SCORES_TH - Detector score threshold (default: 0.2). - --n_limit N_LIMIT Maximum number of keypoints to be detected (default: - 5000). - --no_display Do not display images to screen. Useful if running - remotely (default: False). - --no_sub_pixel Do not detect sub-pixel keypoints (default: False). -``` - - - -## 4. Examples - -### KITTI example -```shell -python demo.py assets/kitti -``` -![](./assets/kitti.gif) - -### TUM example -```shell -python demo.py assets/tum -``` -![](./assets/tum.gif) - -## 5. Efficiency and performance - -| Models | Parameters | GFLOPs(640x480) | MHA@3 on Hpatches | mAA(10°) on [IMW2020-test](https://www.cs.ubc.ca/research/image-matching-challenge/2021/leaderboard) (Stereo) | -|:---:|:---:|:---:|:-----------------:|:-------------------------------------------------------------------------------------------------------------:| -| D2-Net(MS) | 7653KB | 889.40 | 38.33% | 12.27% | -| LF-Net(MS) | 2642KB | 24.37 | 57.78% | 23.44% | -| SuperPoint | 1301KB | 26.11 | 70.19% | 28.97% | -| R2D2(MS) | 484KB | 464.55 | 71.48% | 39.02% | -| ASLFeat(MS) | 823KB | 77.58 | 73.52% | 33.65% | -| DISK | 1092KB | 98.97 | 70.56% | 51.22% | -| ALike-N | 318KB | 7.909 | 75.74% | 47.18% | -| ALike-L | 653KB | 19.685 | 76.85% | 49.58% | - -### Evaluation on Hpatches - -- Download [hpatches-sequences-release](https://hpatches.github.io/) and put it into `hseq/hpatches-sequences-release`. -- Remove the unreliable sequences as D2-Net. -- Run the following command to evaluate the performance: - ```shell - python hseq/eval.py - ``` - - -For more details, please refer to the [paper](https://arxiv.org/abs/2112.02906). diff --git a/spaces/Redgon/bingo/src/components/chat-scroll-anchor.tsx b/spaces/Redgon/bingo/src/components/chat-scroll-anchor.tsx deleted file mode 100644 index ac809f4486a48e134cb69314c3d0dae5e68d614e..0000000000000000000000000000000000000000 --- a/spaces/Redgon/bingo/src/components/chat-scroll-anchor.tsx +++ /dev/null @@ -1,29 +0,0 @@ -'use client' - -import * as React from 'react' -import { useInView } from 'react-intersection-observer' - -import { useAtBottom } from '@/lib/hooks/use-at-bottom' - -interface ChatScrollAnchorProps { - trackVisibility?: boolean -} - -export function ChatScrollAnchor({ trackVisibility }: ChatScrollAnchorProps) { - const isAtBottom = useAtBottom() - const { ref, entry, inView } = useInView({ - trackVisibility, - delay: 100, - rootMargin: '0px 0px -150px 0px' - }) - - React.useEffect(() => { - if (isAtBottom && trackVisibility && !inView) { - entry?.target.scrollIntoView({ - block: 'start' - }) - } - }, [inView, entry, isAtBottom, trackVisibility]) - - return
      -} diff --git a/spaces/Reself/StableVideo/annotator/midas/api.py b/spaces/Reself/StableVideo/annotator/midas/api.py deleted file mode 100644 index 1ab9f15bf96bbaffcee0e3e29fc9d3979d6c32e8..0000000000000000000000000000000000000000 --- a/spaces/Reself/StableVideo/annotator/midas/api.py +++ /dev/null @@ -1,169 +0,0 @@ -# based on https://github.com/isl-org/MiDaS - -import cv2 -import os -import torch -import torch.nn as nn -from torchvision.transforms import Compose - -from .midas.dpt_depth import DPTDepthModel -from .midas.midas_net import MidasNet -from .midas.midas_net_custom import MidasNet_small -from .midas.transforms import Resize, NormalizeImage, PrepareForNet -from annotator.util import annotator_ckpts_path - - -ISL_PATHS = { - "dpt_large": os.path.join(annotator_ckpts_path, "dpt_large-midas-2f21e586.pt"), - "dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"), - "midas_v21": "", - "midas_v21_small": "", -} - -remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt" - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def load_midas_transform(model_type): - # https://github.com/isl-org/MiDaS/blob/master/run.py - # load transform only - if model_type == "dpt_large": # DPT-Large - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "dpt_hybrid": # DPT-Hybrid - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "midas_v21": - net_w, net_h = 384, 384 - resize_mode = "upper_bound" - normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - - elif model_type == "midas_v21_small": - net_w, net_h = 256, 256 - resize_mode = "upper_bound" - normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - - else: - assert False, f"model_type '{model_type}' not implemented, use: --model_type large" - - transform = Compose( - [ - Resize( - net_w, - net_h, - resize_target=None, - keep_aspect_ratio=True, - ensure_multiple_of=32, - resize_method=resize_mode, - image_interpolation_method=cv2.INTER_CUBIC, - ), - normalization, - PrepareForNet(), - ] - ) - - return transform - - -def load_model(model_type): - # https://github.com/isl-org/MiDaS/blob/master/run.py - # load network - model_path = ISL_PATHS[model_type] - if model_type == "dpt_large": # DPT-Large - model = DPTDepthModel( - path=model_path, - backbone="vitl16_384", - non_negative=True, - ) - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "dpt_hybrid": # DPT-Hybrid - if not os.path.exists(model_path): - from basicsr.utils.download_util import load_file_from_url - load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path) - - model = DPTDepthModel( - path=model_path, - backbone="vitb_rn50_384", - non_negative=True, - ) - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "midas_v21": - model = MidasNet(model_path, non_negative=True) - net_w, net_h = 384, 384 - resize_mode = "upper_bound" - normalization = NormalizeImage( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] - ) - - elif model_type == "midas_v21_small": - model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True, - non_negative=True, blocks={'expand': True}) - net_w, net_h = 256, 256 - resize_mode = "upper_bound" - normalization = NormalizeImage( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] - ) - - else: - print(f"model_type '{model_type}' not implemented, use: --model_type large") - assert False - - transform = Compose( - [ - Resize( - net_w, - net_h, - resize_target=None, - keep_aspect_ratio=True, - ensure_multiple_of=32, - resize_method=resize_mode, - image_interpolation_method=cv2.INTER_CUBIC, - ), - normalization, - PrepareForNet(), - ] - ) - - return model.eval(), transform - - -class MiDaSInference(nn.Module): - MODEL_TYPES_TORCH_HUB = [ - "DPT_Large", - "DPT_Hybrid", - "MiDaS_small" - ] - MODEL_TYPES_ISL = [ - "dpt_large", - "dpt_hybrid", - "midas_v21", - "midas_v21_small", - ] - - def __init__(self, model_type): - super().__init__() - assert (model_type in self.MODEL_TYPES_ISL) - model, _ = load_model(model_type) - self.model = model - self.model.train = disabled_train - - def forward(self, x): - with torch.no_grad(): - prediction = self.model(x) - return prediction - diff --git a/spaces/Riksarkivet/htr_demo/helper/text/overview/duplicate_api/api1.md b/spaces/Riksarkivet/htr_demo/helper/text/overview/duplicate_api/api1.md deleted file mode 100644 index 3ddd3e280ca20da62f3dbc5e06019700e5f05506..0000000000000000000000000000000000000000 --- a/spaces/Riksarkivet/htr_demo/helper/text/overview/duplicate_api/api1.md +++ /dev/null @@ -1,11 +0,0 @@ -## Usage of Client API - -If you prefer to run **Fast track** programmatically, we offer an API for that purpose. - -- [REST API](https://riksarkivet-htr-demo.hf.space/?view=api) -- Docuemtnation for gradio client with [python](https://www.gradio.app/guides/getting-started-with-the-python-client) -- Docuemtnation for gradio client with [javascript](https://www.gradio.app/guides/getting-started-with-the-js-client) - -**Note**: More extensive APIs and documentation can be added in the future upon request. - -See example below for usage of API in python: diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/evaluation/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/evaluation/__init__.py deleted file mode 100644 index b1fb4062967f7e225a75c697a990153d4f5b5481..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/evaluation/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, - get_classes, imagenet_det_classes, - imagenet_vid_classes, voc_classes, get_palette) -from .eval_hooks import DistEvalHook, EvalHook -from .mean_ap import average_precision, eval_map, print_map_summary -from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, - print_recall_summary) - -__all__ = [ - 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', - 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', 'get_palette', - 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map', - 'print_map_summary', 'eval_recalls', 'print_recall_summary', - 'plot_num_recall', 'plot_iou_recall' -] diff --git a/spaces/Rohith33/facedetector/README.md b/spaces/Rohith33/facedetector/README.md deleted file mode 100644 index 9f54ead94efc1e6f8daea842b397908b200d8734..0000000000000000000000000000000000000000 --- a/spaces/Rohith33/facedetector/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Facedetector -emoji: 🏃 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/torch_utils/ops/__init__.py b/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/torch_utils/ops/__init__.py deleted file mode 100644 index ece0ea08fe2e939cc260a1dafc0ab5b391b773d9..0000000000000000000000000000000000000000 --- a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/torch_utils/ops/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -# empty diff --git a/spaces/RugNlpFlashcards/Speech_Language_Processing_Jurafsky_Martin/src/readers/base_reader.py b/spaces/RugNlpFlashcards/Speech_Language_Processing_Jurafsky_Martin/src/readers/base_reader.py deleted file mode 100644 index 7a3781da634a6e1b5ac5227640854c3a0f977317..0000000000000000000000000000000000000000 --- a/spaces/RugNlpFlashcards/Speech_Language_Processing_Jurafsky_Martin/src/readers/base_reader.py +++ /dev/null @@ -1,9 +0,0 @@ -from typing import Dict, List, Tuple - - -class Reader(): - def read(self, - query: str, - context: Dict[str, List[str]], - num_answers: int) -> List[Tuple]: - raise NotImplementedError() diff --git a/spaces/SIGGRAPH2022/DCT-Net/source/mtcnn_pytorch/src/align_trans.py b/spaces/SIGGRAPH2022/DCT-Net/source/mtcnn_pytorch/src/align_trans.py deleted file mode 100644 index baa3ba731f5a114090baf609ccd481dd23a01ea0..0000000000000000000000000000000000000000 --- a/spaces/SIGGRAPH2022/DCT-Net/source/mtcnn_pytorch/src/align_trans.py +++ /dev/null @@ -1,187 +0,0 @@ -""" -Created on Mon Apr 24 15:43:29 2017 -@author: zhaoy -""" -import cv2 -import numpy as np - -from .matlab_cp2tform import get_similarity_transform_for_cv2 - -# reference facial points, a list of coordinates (x,y) -dx = 1 -dy = 1 -REFERENCE_FACIAL_POINTS = [ - [30.29459953 + dx, 51.69630051 + dy], # left eye - [65.53179932 + dx, 51.50139999 + dy], # right eye - [48.02519989 + dx, 71.73660278 + dy], # nose - [33.54930115 + dx, 92.3655014 + dy], # left mouth - [62.72990036 + dx, 92.20410156 + dy] # right mouth -] - -DEFAULT_CROP_SIZE = (96, 112) - -global FACIAL_POINTS - - -class FaceWarpException(Exception): - - def __str__(self): - return 'In File {}:{}'.format(__file__, super.__str__(self)) - - -def get_reference_facial_points(output_size=None, - inner_padding_factor=0.0, - outer_padding=(0, 0), - default_square=False): - - tmp_5pts = np.array(REFERENCE_FACIAL_POINTS) - tmp_crop_size = np.array(DEFAULT_CROP_SIZE) - - # 0) make the inner region a square - if default_square: - size_diff = max(tmp_crop_size) - tmp_crop_size - tmp_5pts += size_diff / 2 - tmp_crop_size += size_diff - - h_crop = tmp_crop_size[0] - w_crop = tmp_crop_size[1] - if (output_size): - if (output_size[0] == h_crop and output_size[1] == w_crop): - return tmp_5pts - - if (inner_padding_factor == 0 and outer_padding == (0, 0)): - if output_size is None: - return tmp_5pts - else: - raise FaceWarpException( - 'No paddings to do, output_size must be None or {}'.format( - tmp_crop_size)) - - # check output size - if not (0 <= inner_padding_factor <= 1.0): - raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)') - - factor = inner_padding_factor > 0 or outer_padding[0] > 0 - factor = factor or outer_padding[1] > 0 - if (factor and output_size is None): - output_size = tmp_crop_size * \ - (1 + inner_padding_factor * 2).astype(np.int32) - output_size += np.array(outer_padding) - - cond1 = outer_padding[0] < output_size[0] - cond2 = outer_padding[1] < output_size[1] - if not (cond1 and cond2): - raise FaceWarpException('Not (outer_padding[0] < output_size[0]' - 'and outer_padding[1] < output_size[1])') - - # 1) pad the inner region according inner_padding_factor - if inner_padding_factor > 0: - size_diff = tmp_crop_size * inner_padding_factor * 2 - tmp_5pts += size_diff / 2 - tmp_crop_size += np.round(size_diff).astype(np.int32) - - # 2) resize the padded inner region - size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2 - - if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[ - 1] * tmp_crop_size[0]: - raise FaceWarpException( - 'Must have (output_size - outer_padding)' - '= some_scale * (crop_size * (1.0 + inner_padding_factor)') - - scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0] - tmp_5pts = tmp_5pts * scale_factor - - # 3) add outer_padding to make output_size - reference_5point = tmp_5pts + np.array(outer_padding) - - return reference_5point - - -def get_affine_transform_matrix(src_pts, dst_pts): - - tfm = np.float32([[1, 0, 0], [0, 1, 0]]) - n_pts = src_pts.shape[0] - ones = np.ones((n_pts, 1), src_pts.dtype) - src_pts_ = np.hstack([src_pts, ones]) - dst_pts_ = np.hstack([dst_pts, ones]) - - A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_) - - if rank == 3: - tfm = np.float32([[A[0, 0], A[1, 0], A[2, 0]], - [A[0, 1], A[1, 1], A[2, 1]]]) - elif rank == 2: - tfm = np.float32([[A[0, 0], A[1, 0], 0], [A[0, 1], A[1, 1], 0]]) - - return tfm - - -def warp_and_crop_face(src_img, - facial_pts, - ratio=0.84, - reference_pts=None, - crop_size=(96, 112), - align_type='similarity' - '', - return_trans_inv=False): - - if reference_pts is None: - if crop_size[0] == 96 and crop_size[1] == 112: - reference_pts = REFERENCE_FACIAL_POINTS - else: - default_square = False - inner_padding_factor = 0 - outer_padding = (0, 0) - output_size = crop_size - - reference_pts = get_reference_facial_points( - output_size, inner_padding_factor, outer_padding, - default_square) - - ref_pts = np.float32(reference_pts) - - factor = ratio - ref_pts = (ref_pts - 112 / 2) * factor + 112 / 2 - ref_pts *= crop_size[0] / 112. - - ref_pts_shp = ref_pts.shape - if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2: - raise FaceWarpException( - 'reference_pts.shape must be (K,2) or (2,K) and K>2') - - if ref_pts_shp[0] == 2: - ref_pts = ref_pts.T - - src_pts = np.float32(facial_pts) - src_pts_shp = src_pts.shape - if max(src_pts_shp) < 3 or min(src_pts_shp) != 2: - raise FaceWarpException( - 'facial_pts.shape must be (K,2) or (2,K) and K>2') - - if src_pts_shp[0] == 2: - src_pts = src_pts.T - - if src_pts.shape != ref_pts.shape: - raise FaceWarpException( - 'facial_pts and reference_pts must have the same shape') - - if align_type == 'cv2_affine': - tfm = cv2.getAffineTransform(src_pts, ref_pts) - tfm_inv = cv2.getAffineTransform(ref_pts, src_pts) - - elif align_type == 'affine': - tfm = get_affine_transform_matrix(src_pts, ref_pts) - tfm_inv = get_affine_transform_matrix(ref_pts, src_pts) - else: - tfm, tfm_inv = get_similarity_transform_for_cv2(src_pts, ref_pts) - - face_img = cv2.warpAffine( - src_img, - tfm, (crop_size[0], crop_size[1]), - borderValue=(255, 255, 255)) - - if return_trans_inv: - return face_img, tfm_inv - else: - return face_img diff --git a/spaces/SRDdev/HingMaskedLM/README.md b/spaces/SRDdev/HingMaskedLM/README.md deleted file mode 100644 index 227c4afa52482983926e7e4138574ef24dbe2609..0000000000000000000000000000000000000000 --- a/spaces/SRDdev/HingMaskedLM/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: HingMaskedLM -emoji: 🌍 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SeViLA/SeViLA/lavis/datasets/datasets/coco_vqa_datasets.py b/spaces/SeViLA/SeViLA/lavis/datasets/datasets/coco_vqa_datasets.py deleted file mode 100644 index b6e07f09a69d0c65bcffe7ae545ceaddb28ab2a0..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/lavis/datasets/datasets/coco_vqa_datasets.py +++ /dev/null @@ -1,107 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import os -import json - -from PIL import Image - -from lavis.datasets.datasets.vqa_datasets import VQADataset, VQAEvalDataset - -from collections import OrderedDict - - -class __DisplMixin: - def displ_item(self, index): - sample, ann = self.__getitem__(index), self.annotation[index] - - return OrderedDict( - { - "file": ann["image"], - "question": ann["question"], - "question_id": ann["question_id"], - "answers": "; ".join(ann["answer"]), - "image": sample["image"], - } - ) - - -class COCOVQADataset(VQADataset, __DisplMixin): - def __init__(self, vis_processor, text_processor, vis_root, ann_paths): - super().__init__(vis_processor, text_processor, vis_root, ann_paths) - - def __getitem__(self, index): - ann = self.annotation[index] - - image_path = os.path.join(self.vis_root, ann["image"]) - image = Image.open(image_path).convert("RGB") - - image = self.vis_processor(image) - question = self.text_processor(ann["question"]) - - answer_weight = {} - for answer in ann["answer"]: - if answer in answer_weight.keys(): - answer_weight[answer] += 1 / len(ann["answer"]) - else: - answer_weight[answer] = 1 / len(ann["answer"]) - - answers = list(answer_weight.keys()) - weights = list(answer_weight.values()) - - return { - "image": image, - "text_input": question, - "answers": answers, - "weights": weights, - } - - -class COCOVQAEvalDataset(VQAEvalDataset, __DisplMixin): - def __init__(self, vis_processor, text_processor, vis_root, ann_paths): - """ - vis_root (string): Root directory of images (e.g. coco/images/) - ann_root (string): directory to store the annotation file - """ - - self.vis_root = vis_root - - self.annotation = json.load(open(ann_paths[0])) - - answer_list_path = ann_paths[1] - if os.path.exists(answer_list_path): - self.answer_list = json.load(open(answer_list_path)) - else: - self.answer_list = None - - try: - self.coco_fmt_qust_file = ann_paths[2] - self.coco_fmt_anno_file = ann_paths[3] - except IndexError: - self.coco_fmt_qust_file = None - self.coco_fmt_anno_file = None - - self.vis_processor = vis_processor - self.text_processor = text_processor - - self._add_instance_ids() - - def __getitem__(self, index): - ann = self.annotation[index] - - image_path = os.path.join(self.vis_root, ann["image"]) - image = Image.open(image_path).convert("RGB") - - image = self.vis_processor(image) - question = self.text_processor(ann["question"]) - - return { - "image": image, - "text_input": question, - "question_id": ann["question_id"], - "instance_id": ann["instance_id"], - } diff --git a/spaces/Semii/OpenPoseSkeleton/start.py b/spaces/Semii/OpenPoseSkeleton/start.py deleted file mode 100644 index e5d512289a4581dca4612d6aa2390ace7e534426..0000000000000000000000000000000000000000 --- a/spaces/Semii/OpenPoseSkeleton/start.py +++ /dev/null @@ -1,3 +0,0 @@ -import subprocess - -subprocess.run("uvicorn app:app --host 0.0.0.0 --port 7860", shell=True) diff --git a/spaces/ServerX/PorcoDiaz/diffq/base.py b/spaces/ServerX/PorcoDiaz/diffq/base.py deleted file mode 100644 index 9bd5276b51fbed3d4b898a45b93479ff19e62a7b..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/diffq/base.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass -from concurrent import futures -from fnmatch import fnmatch -from functools import partial -import io -import math -from multiprocessing import cpu_count -import typing as tp -import zlib - -import torch - - -class BaseQuantizer: - @dataclass - class _QuantizedParam: - name: str - param: torch.nn.Parameter - module: torch.nn.Module - # If a Parameter is used multiple times, `other` can be used - # to share state between the different Quantizers - other: tp.Optional[tp.Any] - - def __init__(self, model: torch.nn.Module, min_size: float = 0.01, float16: bool = False, - exclude: tp.Optional[tp.List[str]] = [], detect_bound: bool = True): - self.model = model - self.min_size = min_size - self.float16 = float16 - self.exclude = exclude - self.detect_bound = detect_bound - self._quantized = False - self._pre_handle = self.model.register_forward_pre_hook(self._forward_pre_hook) - self._post_handle = self.model.register_forward_hook(self._forward_hook) - - self._quantized_state = None - self._qparams = [] - self._float16 = [] - self._others = [] - self._rnns = [] - - self._saved = [] - - self._find_params() - - def _find_params(self): - min_params = self.min_size * 2**20 // 4 - previous = {} - for module_name, module in self.model.named_modules(): - if isinstance(module, torch.nn.RNNBase): - self._rnns.append(module) - for name, param in list(module.named_parameters(recurse=False)): - full_name = f"{module_name}.{name}" - matched = False - for pattern in self.exclude: - if fnmatch(full_name, pattern) or fnmatch(name, pattern): - matched = True - break - - if param.numel() <= min_params or matched: - if id(param) in previous: - continue - if self.detect_bound: - previous[id(param)] = None - if self.float16: - self._float16.append(param) - else: - self._others.append(param) - else: - qparam = self._register_param(name, param, module, previous.get(id(param))) - if self.detect_bound: - previous[id(param)] = qparam - self._qparams.append(qparam) - - def _register_param(self, name, param, module, other): - return self.__class__._QuantizedParam(name, param, module, other) - - def _forward_pre_hook(self, module, input): - if self.model.training: - self._quantized_state = None - if self._quantized: - self.unquantize() - if self._pre_forward_train(): - self._fix_rnns() - else: - self.quantize() - - def _forward_hook(self, module, input, output): - if self.model.training: - if self._post_forward_train(): - self._fix_rnns(flatten=False) # Hacky, next forward will flatten - - def quantize(self, save=True): - """ - Immediately apply quantization to the model parameters. - If `save` is True, save a copy of the unquantized parameters, that can be - restored with `unquantize()`. - """ - if self._quantized: - return - if save: - self._saved = [qp.param.data.to('cpu', copy=True) - for qp in self._qparams if qp.other is None] - self.restore_quantized_state(self.get_quantized_state()) - self._quantized = True - self._fix_rnns() - - def unquantize(self): - """ - Revert a previous call to `quantize()`. - """ - if not self._quantized: - raise RuntimeError("Can only be called on a quantized model.") - if not self._saved: - raise RuntimeError("Nothing to restore.") - for qparam in self._qparams: - if qparam.other is None: - qparam.param.data[:] = self._saved.pop(0) - assert len(self._saved) == 0 - self._quantized = False - self._fix_rnns() - - def _pre_forward_train(self) -> bool: - """ - Called once before each forward for continuous quantization. - Should return True if parameters were changed. - """ - return False - - def _post_forward_train(self) -> bool: - """ - Called once after each forward (to restore state for instance). - Should return True if parameters were changed. - """ - return False - - def _fix_rnns(self, flatten=True): - """ - To be called after quantization happened to fix RNNs. - """ - for rnn in self._rnns: - rnn._flat_weights = [ - (lambda wn: getattr(rnn, wn) if hasattr(rnn, wn) else None)(wn) - for wn in rnn._flat_weights_names] - if flatten: - rnn.flatten_parameters() - - def get_quantized_state(self): - """ - Returns sufficient quantized information to rebuild the model state. - - ..Note:: - To achieve maximum compression, you should compress this with - gzip or other, as quantized weights are not optimally coded! - """ - if self._quantized_state is None: - self._quantized_state = self._get_quantized_state() - return self._quantized_state - - def _get_quantized_state(self): - """ - Actual implementation for `get_quantized_state`. - """ - float16_params = [] - for p in self._float16: - q = p.data.half() - float16_params.append(q) - - return { - "quantized": [self._quantize_param(qparam) for qparam in self._qparams - if qparam.other is None], - "float16": float16_params, - "others": [p.data.clone() for p in self._others], - } - - def _quantize_param(self, qparam: _QuantizedParam) -> tp.Any: - """ - To be overriden. - """ - raise NotImplementedError() - - def _unquantize_param(self, qparam: _QuantizedParam, quantized: tp.Any) -> torch.Tensor: - """ - To be overriden. - """ - raise NotImplementedError() - - def restore_quantized_state(self, state) -> None: - """ - Restore the state of the model from the quantized state. - """ - for p, q in zip(self._float16, state["float16"]): - p.data[:] = q.to(p) - - for p, q in zip(self._others, state["others"]): - p.data[:] = q - - remaining = list(state["quantized"]) - for qparam in self._qparams: - if qparam.other is not None: - # Only unquantize first appearance of nn.Parameter. - continue - quantized = remaining.pop(0) - qparam.param.data[:] = self._unquantize_param(qparam, quantized) - self._fix_rnns() - - def detach(self) -> None: - """ - Detach from the model, removes hooks and anything else. - """ - self._pre_handle.remove() - self._post_handle.remove() - - def model_size(self) -> torch.Tensor: - """ - Returns an estimate of the quantized model size. - """ - total = torch.tensor(0.) - for p in self._float16: - total += 16 * p.numel() - for p in self._others: - total += 32 * p.numel() - return total / 2**20 / 8 # bits to MegaBytes - - def true_model_size(self) -> float: - """ - Return the true quantized model size, in MB, without extra - compression. - """ - return self.model_size().item() - - def compressed_model_size(self, compress_level=-1, num_workers=8) -> float: - """ - Return the compressed quantized model size, in MB. - - Args: - compress_level (int): compression level used with zlib, - see `zlib.compress` for details. - num_workers (int): will split the final big byte representation in that - many chunks processed in parallels. - """ - out = io.BytesIO() - torch.save(self.get_quantized_state(), out) - ms = _parallel_compress_len(out.getvalue(), compress_level, num_workers) - return ms / 2 ** 20 - - -def _compress_len(data, compress_level): - return len(zlib.compress(data, level=compress_level)) - - -def _parallel_compress_len(data, compress_level, num_workers): - num_workers = min(cpu_count(), num_workers) - chunk_size = int(math.ceil(len(data) / num_workers)) - chunks = [data[offset:offset + chunk_size] for offset in range(0, len(data), chunk_size)] - with futures.ProcessPoolExecutor(num_workers) as pool: - return sum(pool.map(partial(_compress_len, compress_level=compress_level), chunks)) diff --git a/spaces/SimianLuo/Latent_Consistency_Model/style.css b/spaces/SimianLuo/Latent_Consistency_Model/style.css deleted file mode 100644 index 0b295a8234b60c0491ae4981196d1b9fc4553e0a..0000000000000000000000000000000000000000 --- a/spaces/SimianLuo/Latent_Consistency_Model/style.css +++ /dev/null @@ -1,16 +0,0 @@ -h1 { - text-align: center; -} - -#duplicate-button { - margin: auto; - color: #fff; - background: #1565c0; - border-radius: 100vh; -} - -#component-0 { - max-width: 830px; - margin: auto; - padding-top: 1.5rem; -} diff --git a/spaces/Stoa/budget_gpt/README.md b/spaces/Stoa/budget_gpt/README.md deleted file mode 100644 index f6feff1b6a7c51a034eacad0d46683c8eac4457c..0000000000000000000000000000000000000000 --- a/spaces/Stoa/budget_gpt/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Budget Gpt -emoji: 📚 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/macro.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/macro.py deleted file mode 100644 index ce86898cac8734f9364353ed3fad9545d0f6043c..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/macro.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Support for interactive macros in IPython""" - -#***************************************************************************** -# Copyright (C) 2001-2005 Fernando Perez -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#***************************************************************************** - -import re - -from IPython.utils.encoding import DEFAULT_ENCODING - -coding_declaration = re.compile(r"#\s*coding[:=]\s*([-\w.]+)") - -class Macro(object): - """Simple class to store the value of macros as strings. - - Macro is just a callable that executes a string of IPython - input when called. - """ - - def __init__(self,code): - """store the macro value, as a single string which can be executed""" - lines = [] - enc = None - for line in code.splitlines(): - coding_match = coding_declaration.match(line) - if coding_match: - enc = coding_match.group(1) - else: - lines.append(line) - code = "\n".join(lines) - if isinstance(code, bytes): - code = code.decode(enc or DEFAULT_ENCODING) - self.value = code + '\n' - - def __str__(self): - return self.value - - def __repr__(self): - return 'IPython.macro.Macro(%s)' % repr(self.value) - - def __getstate__(self): - """ needed for safe pickling via %store """ - return {'value': self.value} - - def __add__(self, other): - if isinstance(other, Macro): - return Macro(self.value + other.value) - elif isinstance(other, str): - return Macro(self.value + other) - raise TypeError diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/common/json.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/common/json.py deleted file mode 100644 index 6f3e2b21426655000ce33e68b370583653d25d10..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/common/json.py +++ /dev/null @@ -1,292 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See LICENSE in the project root -# for license information. - -"""Improved JSON serialization. -""" - -import builtins -import json -import numbers -import operator - - -JsonDecoder = json.JSONDecoder - - -class JsonEncoder(json.JSONEncoder): - """Customizable JSON encoder. - - If the object implements __getstate__, then that method is invoked, and its - result is serialized instead of the object itself. - """ - - def default(self, value): - try: - get_state = value.__getstate__ - except AttributeError: - pass - else: - return get_state() - return super().default(value) - - -class JsonObject(object): - """A wrapped Python object that formats itself as JSON when asked for a string - representation via str() or format(). - """ - - json_encoder_factory = JsonEncoder - """Used by __format__ when format_spec is not empty.""" - - json_encoder = json_encoder_factory(indent=4) - """The default encoder used by __format__ when format_spec is empty.""" - - def __init__(self, value): - assert not isinstance(value, JsonObject) - self.value = value - - def __getstate__(self): - raise NotImplementedError - - def __repr__(self): - return builtins.repr(self.value) - - def __str__(self): - return format(self) - - def __format__(self, format_spec): - """If format_spec is empty, uses self.json_encoder to serialize self.value - as a string. Otherwise, format_spec is treated as an argument list to be - passed to self.json_encoder_factory - which defaults to JSONEncoder - and - then the resulting formatter is used to serialize self.value as a string. - - Example:: - - format("{0} {0:indent=4,sort_keys=True}", json.repr(x)) - """ - if format_spec: - # At this point, format_spec is a string that looks something like - # "indent=4,sort_keys=True". What we want is to build a function call - # from that which looks like: - # - # json_encoder_factory(indent=4,sort_keys=True) - # - # which we can then eval() to create our encoder instance. - make_encoder = "json_encoder_factory(" + format_spec + ")" - encoder = eval( - make_encoder, {"json_encoder_factory": self.json_encoder_factory} - ) - else: - encoder = self.json_encoder - return encoder.encode(self.value) - - -# JSON property validators, for use with MessageDict. -# -# A validator is invoked with the actual value of the JSON property passed to it as -# the sole argument; or if the property is missing in JSON, then () is passed. Note -# that None represents an actual null in JSON, while () is a missing value. -# -# The validator must either raise TypeError or ValueError describing why the property -# value is invalid, or else return the value of the property, possibly after performing -# some substitutions - e.g. replacing () with some default value. - - -def _converter(value, classinfo): - """Convert value (str) to number, otherwise return None if is not possible""" - for one_info in classinfo: - if issubclass(one_info, numbers.Number): - try: - return one_info(value) - except ValueError: - pass - - -def of_type(*classinfo, **kwargs): - """Returns a validator for a JSON property that requires it to have a value of - the specified type. If optional=True, () is also allowed. - - The meaning of classinfo is the same as for isinstance(). - """ - - assert len(classinfo) - optional = kwargs.pop("optional", False) - assert not len(kwargs) - - def validate(value): - if (optional and value == ()) or isinstance(value, classinfo): - return value - else: - converted_value = _converter(value, classinfo) - if converted_value: - return converted_value - - if not optional and value == (): - raise ValueError("must be specified") - raise TypeError("must be " + " or ".join(t.__name__ for t in classinfo)) - - return validate - - -def default(default): - """Returns a validator for a JSON property with a default value. - - The validator will only allow property values that have the same type as the - specified default value. - """ - - def validate(value): - if value == (): - return default - elif isinstance(value, type(default)): - return value - else: - raise TypeError("must be {0}".format(type(default).__name__)) - - return validate - - -def enum(*values, **kwargs): - """Returns a validator for a JSON enum. - - The validator will only allow the property to have one of the specified values. - - If optional=True, and the property is missing, the first value specified is used - as the default. - """ - - assert len(values) - optional = kwargs.pop("optional", False) - assert not len(kwargs) - - def validate(value): - if optional and value == (): - return values[0] - elif value in values: - return value - else: - raise ValueError("must be one of: {0!r}".format(list(values))) - - return validate - - -def array(validate_item=False, vectorize=False, size=None): - """Returns a validator for a JSON array. - - If the property is missing, it is treated as if it were []. Otherwise, it must - be a list. - - If validate_item=False, it's treated as if it were (lambda x: x) - i.e. any item - is considered valid, and is unchanged. If validate_item is a type or a tuple, - it's treated as if it were json.of_type(validate). - - Every item in the list is replaced with validate_item(item) in-place, propagating - any exceptions raised by the latter. If validate_item is a type or a tuple, it is - treated as if it were json.of_type(validate_item). - - If vectorize=True, and the value is neither a list nor a dict, it is treated as - if it were a single-element list containing that single value - e.g. "foo" is - then the same as ["foo"]; but {} is an error, and not [{}]. - - If size is not None, it can be an int, a tuple of one int, a tuple of two ints, - or a set. If it's an int, the array must have exactly that many elements. If it's - a tuple of one int, it's the minimum length. If it's a tuple of two ints, they - are the minimum and the maximum lengths. If it's a set, it's the set of sizes that - are valid - e.g. for {2, 4}, the array can be either 2 or 4 elements long. - """ - - if not validate_item: - validate_item = lambda x: x - elif isinstance(validate_item, type) or isinstance(validate_item, tuple): - validate_item = of_type(validate_item) - - if size is None: - validate_size = lambda _: True - elif isinstance(size, set): - size = {operator.index(n) for n in size} - validate_size = lambda value: ( - len(value) in size - or "must have {0} elements".format( - " or ".join(str(n) for n in sorted(size)) - ) - ) - elif isinstance(size, tuple): - assert 1 <= len(size) <= 2 - size = tuple(operator.index(n) for n in size) - min_len, max_len = (size + (None,))[0:2] - validate_size = lambda value: ( - "must have at least {0} elements".format(min_len) - if len(value) < min_len - else "must have at most {0} elements".format(max_len) - if max_len is not None and len(value) < max_len - else True - ) - else: - size = operator.index(size) - validate_size = lambda value: ( - len(value) == size or "must have {0} elements".format(size) - ) - - def validate(value): - if value == (): - value = [] - elif vectorize and not isinstance(value, (list, dict)): - value = [value] - - of_type(list)(value) - - size_err = validate_size(value) # True if valid, str if error - if size_err is not True: - raise ValueError(size_err) - - for i, item in enumerate(value): - try: - value[i] = validate_item(item) - except (TypeError, ValueError) as exc: - raise type(exc)(f"[{repr(i)}] {exc}") - return value - - return validate - - -def object(validate_value=False): - """Returns a validator for a JSON object. - - If the property is missing, it is treated as if it were {}. Otherwise, it must - be a dict. - - If validate_value=False, it's treated as if it were (lambda x: x) - i.e. any - value is considered valid, and is unchanged. If validate_value is a type or a - tuple, it's treated as if it were json.of_type(validate_value). - - Every value in the dict is replaced with validate_value(value) in-place, propagating - any exceptions raised by the latter. If validate_value is a type or a tuple, it is - treated as if it were json.of_type(validate_value). Keys are not affected. - """ - - if isinstance(validate_value, type) or isinstance(validate_value, tuple): - validate_value = of_type(validate_value) - - def validate(value): - if value == (): - return {} - - of_type(dict)(value) - if validate_value: - for k, v in value.items(): - try: - value[k] = validate_value(v) - except (TypeError, ValueError) as exc: - raise type(exc)(f"[{repr(k)}] {exc}") - return value - - return validate - - -def repr(value): - return JsonObject(value) - - -dumps = json.dumps -loads = json.loads diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/cnn/bricks/transformer.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/cnn/bricks/transformer.py deleted file mode 100644 index e61ae0dd941a7be00b3e41a3de833ec50470a45f..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/cnn/bricks/transformer.py +++ /dev/null @@ -1,595 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import warnings - -import torch -import torch.nn as nn - -from annotator.uniformer.mmcv import ConfigDict, deprecated_api_warning -from annotator.uniformer.mmcv.cnn import Linear, build_activation_layer, build_norm_layer -from annotator.uniformer.mmcv.runner.base_module import BaseModule, ModuleList, Sequential -from annotator.uniformer.mmcv.utils import build_from_cfg -from .drop import build_dropout -from .registry import (ATTENTION, FEEDFORWARD_NETWORK, POSITIONAL_ENCODING, - TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE) - -# Avoid BC-breaking of importing MultiScaleDeformableAttention from this file -try: - from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention # noqa F401 - warnings.warn( - ImportWarning( - '``MultiScaleDeformableAttention`` has been moved to ' - '``mmcv.ops.multi_scale_deform_attn``, please change original path ' # noqa E501 - '``from annotator.uniformer.mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention`` ' # noqa E501 - 'to ``from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention`` ' # noqa E501 - )) - -except ImportError: - warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from ' - '``mmcv.ops.multi_scale_deform_attn``, ' - 'You should install ``mmcv-full`` if you need this module. ') - - -def build_positional_encoding(cfg, default_args=None): - """Builder for Position Encoding.""" - return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args) - - -def build_attention(cfg, default_args=None): - """Builder for attention.""" - return build_from_cfg(cfg, ATTENTION, default_args) - - -def build_feedforward_network(cfg, default_args=None): - """Builder for feed-forward network (FFN).""" - return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args) - - -def build_transformer_layer(cfg, default_args=None): - """Builder for transformer layer.""" - return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args) - - -def build_transformer_layer_sequence(cfg, default_args=None): - """Builder for transformer encoder and transformer decoder.""" - return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args) - - -@ATTENTION.register_module() -class MultiheadAttention(BaseModule): - """A wrapper for ``torch.nn.MultiheadAttention``. - - This module implements MultiheadAttention with identity connection, - and positional encoding is also passed as input. - - Args: - embed_dims (int): The embedding dimension. - num_heads (int): Parallel attention heads. - attn_drop (float): A Dropout layer on attn_output_weights. - Default: 0.0. - proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. - Default: 0.0. - dropout_layer (obj:`ConfigDict`): The dropout_layer used - when adding the shortcut. - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Default: None. - batch_first (bool): When it is True, Key, Query and Value are shape of - (batch, n, embed_dim), otherwise (n, batch, embed_dim). - Default to False. - """ - - def __init__(self, - embed_dims, - num_heads, - attn_drop=0., - proj_drop=0., - dropout_layer=dict(type='Dropout', drop_prob=0.), - init_cfg=None, - batch_first=False, - **kwargs): - super(MultiheadAttention, self).__init__(init_cfg) - if 'dropout' in kwargs: - warnings.warn('The arguments `dropout` in MultiheadAttention ' - 'has been deprecated, now you can separately ' - 'set `attn_drop`(float), proj_drop(float), ' - 'and `dropout_layer`(dict) ') - attn_drop = kwargs['dropout'] - dropout_layer['drop_prob'] = kwargs.pop('dropout') - - self.embed_dims = embed_dims - self.num_heads = num_heads - self.batch_first = batch_first - - self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop, - **kwargs) - - self.proj_drop = nn.Dropout(proj_drop) - self.dropout_layer = build_dropout( - dropout_layer) if dropout_layer else nn.Identity() - - @deprecated_api_warning({'residual': 'identity'}, - cls_name='MultiheadAttention') - def forward(self, - query, - key=None, - value=None, - identity=None, - query_pos=None, - key_pos=None, - attn_mask=None, - key_padding_mask=None, - **kwargs): - """Forward function for `MultiheadAttention`. - - **kwargs allow passing a more general data flow when combining - with other operations in `transformerlayer`. - - Args: - query (Tensor): The input query with shape [num_queries, bs, - embed_dims] if self.batch_first is False, else - [bs, num_queries embed_dims]. - key (Tensor): The key tensor with shape [num_keys, bs, - embed_dims] if self.batch_first is False, else - [bs, num_keys, embed_dims] . - If None, the ``query`` will be used. Defaults to None. - value (Tensor): The value tensor with same shape as `key`. - Same in `nn.MultiheadAttention.forward`. Defaults to None. - If None, the `key` will be used. - identity (Tensor): This tensor, with the same shape as x, - will be used for the identity link. - If None, `x` will be used. Defaults to None. - query_pos (Tensor): The positional encoding for query, with - the same shape as `x`. If not None, it will - be added to `x` before forward function. Defaults to None. - key_pos (Tensor): The positional encoding for `key`, with the - same shape as `key`. Defaults to None. If not None, it will - be added to `key` before forward function. If None, and - `query_pos` has the same shape as `key`, then `query_pos` - will be used for `key_pos`. Defaults to None. - attn_mask (Tensor): ByteTensor mask with shape [num_queries, - num_keys]. Same in `nn.MultiheadAttention.forward`. - Defaults to None. - key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. - Defaults to None. - - Returns: - Tensor: forwarded results with shape - [num_queries, bs, embed_dims] - if self.batch_first is False, else - [bs, num_queries embed_dims]. - """ - - if key is None: - key = query - if value is None: - value = key - if identity is None: - identity = query - if key_pos is None: - if query_pos is not None: - # use query_pos if key_pos is not available - if query_pos.shape == key.shape: - key_pos = query_pos - else: - warnings.warn(f'position encoding of key is' - f'missing in {self.__class__.__name__}.') - if query_pos is not None: - query = query + query_pos - if key_pos is not None: - key = key + key_pos - - # Because the dataflow('key', 'query', 'value') of - # ``torch.nn.MultiheadAttention`` is (num_query, batch, - # embed_dims), We should adjust the shape of dataflow from - # batch_first (batch, num_query, embed_dims) to num_query_first - # (num_query ,batch, embed_dims), and recover ``attn_output`` - # from num_query_first to batch_first. - if self.batch_first: - query = query.transpose(0, 1) - key = key.transpose(0, 1) - value = value.transpose(0, 1) - - out = self.attn( - query=query, - key=key, - value=value, - attn_mask=attn_mask, - key_padding_mask=key_padding_mask)[0] - - if self.batch_first: - out = out.transpose(0, 1) - - return identity + self.dropout_layer(self.proj_drop(out)) - - -@FEEDFORWARD_NETWORK.register_module() -class FFN(BaseModule): - """Implements feed-forward networks (FFNs) with identity connection. - - Args: - embed_dims (int): The feature dimension. Same as - `MultiheadAttention`. Defaults: 256. - feedforward_channels (int): The hidden dimension of FFNs. - Defaults: 1024. - num_fcs (int, optional): The number of fully-connected layers in - FFNs. Default: 2. - act_cfg (dict, optional): The activation config for FFNs. - Default: dict(type='ReLU') - ffn_drop (float, optional): Probability of an element to be - zeroed in FFN. Default 0.0. - add_identity (bool, optional): Whether to add the - identity connection. Default: `True`. - dropout_layer (obj:`ConfigDict`): The dropout_layer used - when adding the shortcut. - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Default: None. - """ - - @deprecated_api_warning( - { - 'dropout': 'ffn_drop', - 'add_residual': 'add_identity' - }, - cls_name='FFN') - def __init__(self, - embed_dims=256, - feedforward_channels=1024, - num_fcs=2, - act_cfg=dict(type='ReLU', inplace=True), - ffn_drop=0., - dropout_layer=None, - add_identity=True, - init_cfg=None, - **kwargs): - super(FFN, self).__init__(init_cfg) - assert num_fcs >= 2, 'num_fcs should be no less ' \ - f'than 2. got {num_fcs}.' - self.embed_dims = embed_dims - self.feedforward_channels = feedforward_channels - self.num_fcs = num_fcs - self.act_cfg = act_cfg - self.activate = build_activation_layer(act_cfg) - - layers = [] - in_channels = embed_dims - for _ in range(num_fcs - 1): - layers.append( - Sequential( - Linear(in_channels, feedforward_channels), self.activate, - nn.Dropout(ffn_drop))) - in_channels = feedforward_channels - layers.append(Linear(feedforward_channels, embed_dims)) - layers.append(nn.Dropout(ffn_drop)) - self.layers = Sequential(*layers) - self.dropout_layer = build_dropout( - dropout_layer) if dropout_layer else torch.nn.Identity() - self.add_identity = add_identity - - @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN') - def forward(self, x, identity=None): - """Forward function for `FFN`. - - The function would add x to the output tensor if residue is None. - """ - out = self.layers(x) - if not self.add_identity: - return self.dropout_layer(out) - if identity is None: - identity = x - return identity + self.dropout_layer(out) - - -@TRANSFORMER_LAYER.register_module() -class BaseTransformerLayer(BaseModule): - """Base `TransformerLayer` for vision transformer. - - It can be built from `mmcv.ConfigDict` and support more flexible - customization, for example, using any number of `FFN or LN ` and - use different kinds of `attention` by specifying a list of `ConfigDict` - named `attn_cfgs`. It is worth mentioning that it supports `prenorm` - when you specifying `norm` as the first element of `operation_order`. - More details about the `prenorm`: `On Layer Normalization in the - Transformer Architecture `_ . - - Args: - attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): - Configs for `self_attention` or `cross_attention` modules, - The order of the configs in the list should be consistent with - corresponding attentions in operation_order. - If it is a dict, all of the attention modules in operation_order - will be built with this config. Default: None. - ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): - Configs for FFN, The order of the configs in the list should be - consistent with corresponding ffn in operation_order. - If it is a dict, all of the attention modules in operation_order - will be built with this config. - operation_order (tuple[str]): The execution order of operation - in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). - Support `prenorm` when you specifying first element as `norm`. - Default:None. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN'). - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Default: None. - batch_first (bool): Key, Query and Value are shape - of (batch, n, embed_dim) - or (n, batch, embed_dim). Default to False. - """ - - def __init__(self, - attn_cfgs=None, - ffn_cfgs=dict( - type='FFN', - embed_dims=256, - feedforward_channels=1024, - num_fcs=2, - ffn_drop=0., - act_cfg=dict(type='ReLU', inplace=True), - ), - operation_order=None, - norm_cfg=dict(type='LN'), - init_cfg=None, - batch_first=False, - **kwargs): - - deprecated_args = dict( - feedforward_channels='feedforward_channels', - ffn_dropout='ffn_drop', - ffn_num_fcs='num_fcs') - for ori_name, new_name in deprecated_args.items(): - if ori_name in kwargs: - warnings.warn( - f'The arguments `{ori_name}` in BaseTransformerLayer ' - f'has been deprecated, now you should set `{new_name}` ' - f'and other FFN related arguments ' - f'to a dict named `ffn_cfgs`. ') - ffn_cfgs[new_name] = kwargs[ori_name] - - super(BaseTransformerLayer, self).__init__(init_cfg) - - self.batch_first = batch_first - - assert set(operation_order) & set( - ['self_attn', 'norm', 'ffn', 'cross_attn']) == \ - set(operation_order), f'The operation_order of' \ - f' {self.__class__.__name__} should ' \ - f'contains all four operation type ' \ - f"{['self_attn', 'norm', 'ffn', 'cross_attn']}" - - num_attn = operation_order.count('self_attn') + operation_order.count( - 'cross_attn') - if isinstance(attn_cfgs, dict): - attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] - else: - assert num_attn == len(attn_cfgs), f'The length ' \ - f'of attn_cfg {num_attn} is ' \ - f'not consistent with the number of attention' \ - f'in operation_order {operation_order}.' - - self.num_attn = num_attn - self.operation_order = operation_order - self.norm_cfg = norm_cfg - self.pre_norm = operation_order[0] == 'norm' - self.attentions = ModuleList() - - index = 0 - for operation_name in operation_order: - if operation_name in ['self_attn', 'cross_attn']: - if 'batch_first' in attn_cfgs[index]: - assert self.batch_first == attn_cfgs[index]['batch_first'] - else: - attn_cfgs[index]['batch_first'] = self.batch_first - attention = build_attention(attn_cfgs[index]) - # Some custom attentions used as `self_attn` - # or `cross_attn` can have different behavior. - attention.operation_name = operation_name - self.attentions.append(attention) - index += 1 - - self.embed_dims = self.attentions[0].embed_dims - - self.ffns = ModuleList() - num_ffns = operation_order.count('ffn') - if isinstance(ffn_cfgs, dict): - ffn_cfgs = ConfigDict(ffn_cfgs) - if isinstance(ffn_cfgs, dict): - ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] - assert len(ffn_cfgs) == num_ffns - for ffn_index in range(num_ffns): - if 'embed_dims' not in ffn_cfgs[ffn_index]: - ffn_cfgs['embed_dims'] = self.embed_dims - else: - assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims - self.ffns.append( - build_feedforward_network(ffn_cfgs[ffn_index], - dict(type='FFN'))) - - self.norms = ModuleList() - num_norms = operation_order.count('norm') - for _ in range(num_norms): - self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) - - def forward(self, - query, - key=None, - value=None, - query_pos=None, - key_pos=None, - attn_masks=None, - query_key_padding_mask=None, - key_padding_mask=None, - **kwargs): - """Forward function for `TransformerDecoderLayer`. - - **kwargs contains some specific arguments of attentions. - - Args: - query (Tensor): The input query with shape - [num_queries, bs, embed_dims] if - self.batch_first is False, else - [bs, num_queries embed_dims]. - key (Tensor): The key tensor with shape [num_keys, bs, - embed_dims] if self.batch_first is False, else - [bs, num_keys, embed_dims] . - value (Tensor): The value tensor with same shape as `key`. - query_pos (Tensor): The positional encoding for `query`. - Default: None. - key_pos (Tensor): The positional encoding for `key`. - Default: None. - attn_masks (List[Tensor] | None): 2D Tensor used in - calculation of corresponding attention. The length of - it should equal to the number of `attention` in - `operation_order`. Default: None. - query_key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_queries]. Only used in `self_attn` layer. - Defaults to None. - key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_keys]. Default: None. - - Returns: - Tensor: forwarded results with shape [num_queries, bs, embed_dims]. - """ - - norm_index = 0 - attn_index = 0 - ffn_index = 0 - identity = query - if attn_masks is None: - attn_masks = [None for _ in range(self.num_attn)] - elif isinstance(attn_masks, torch.Tensor): - attn_masks = [ - copy.deepcopy(attn_masks) for _ in range(self.num_attn) - ] - warnings.warn(f'Use same attn_mask in all attentions in ' - f'{self.__class__.__name__} ') - else: - assert len(attn_masks) == self.num_attn, f'The length of ' \ - f'attn_masks {len(attn_masks)} must be equal ' \ - f'to the number of attention in ' \ - f'operation_order {self.num_attn}' - - for layer in self.operation_order: - if layer == 'self_attn': - temp_key = temp_value = query - query = self.attentions[attn_index]( - query, - temp_key, - temp_value, - identity if self.pre_norm else None, - query_pos=query_pos, - key_pos=query_pos, - attn_mask=attn_masks[attn_index], - key_padding_mask=query_key_padding_mask, - **kwargs) - attn_index += 1 - identity = query - - elif layer == 'norm': - query = self.norms[norm_index](query) - norm_index += 1 - - elif layer == 'cross_attn': - query = self.attentions[attn_index]( - query, - key, - value, - identity if self.pre_norm else None, - query_pos=query_pos, - key_pos=key_pos, - attn_mask=attn_masks[attn_index], - key_padding_mask=key_padding_mask, - **kwargs) - attn_index += 1 - identity = query - - elif layer == 'ffn': - query = self.ffns[ffn_index]( - query, identity if self.pre_norm else None) - ffn_index += 1 - - return query - - -@TRANSFORMER_LAYER_SEQUENCE.register_module() -class TransformerLayerSequence(BaseModule): - """Base class for TransformerEncoder and TransformerDecoder in vision - transformer. - - As base-class of Encoder and Decoder in vision transformer. - Support customization such as specifying different kind - of `transformer_layer` in `transformer_coder`. - - Args: - transformerlayer (list[obj:`mmcv.ConfigDict`] | - obj:`mmcv.ConfigDict`): Config of transformerlayer - in TransformerCoder. If it is obj:`mmcv.ConfigDict`, - it would be repeated `num_layer` times to a - list[`mmcv.ConfigDict`]. Default: None. - num_layers (int): The number of `TransformerLayer`. Default: None. - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Default: None. - """ - - def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None): - super(TransformerLayerSequence, self).__init__(init_cfg) - if isinstance(transformerlayers, dict): - transformerlayers = [ - copy.deepcopy(transformerlayers) for _ in range(num_layers) - ] - else: - assert isinstance(transformerlayers, list) and \ - len(transformerlayers) == num_layers - self.num_layers = num_layers - self.layers = ModuleList() - for i in range(num_layers): - self.layers.append(build_transformer_layer(transformerlayers[i])) - self.embed_dims = self.layers[0].embed_dims - self.pre_norm = self.layers[0].pre_norm - - def forward(self, - query, - key, - value, - query_pos=None, - key_pos=None, - attn_masks=None, - query_key_padding_mask=None, - key_padding_mask=None, - **kwargs): - """Forward function for `TransformerCoder`. - - Args: - query (Tensor): Input query with shape - `(num_queries, bs, embed_dims)`. - key (Tensor): The key tensor with shape - `(num_keys, bs, embed_dims)`. - value (Tensor): The value tensor with shape - `(num_keys, bs, embed_dims)`. - query_pos (Tensor): The positional encoding for `query`. - Default: None. - key_pos (Tensor): The positional encoding for `key`. - Default: None. - attn_masks (List[Tensor], optional): Each element is 2D Tensor - which is used in calculation of corresponding attention in - operation_order. Default: None. - query_key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_queries]. Only used in self-attention - Default: None. - key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_keys]. Default: None. - - Returns: - Tensor: results with shape [num_queries, bs, embed_dims]. - """ - for layer in self.layers: - query = layer( - query, - key, - value, - query_pos=query_pos, - key_pos=key_pos, - attn_masks=attn_masks, - query_key_padding_mask=query_key_padding_mask, - key_padding_mask=key_padding_mask, - **kwargs) - return query diff --git a/spaces/TNR-5/zeroscope/text2video.py b/spaces/TNR-5/zeroscope/text2video.py deleted file mode 100644 index a9b23acaf8a74db48937e0ca3f7714e1c2e02df8..0000000000000000000000000000000000000000 --- a/spaces/TNR-5/zeroscope/text2video.py +++ /dev/null @@ -1,154 +0,0 @@ -import gradio as gr -from share_btn import community_icon_html, loading_icon_html, share_js -import torch -from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler -from diffusers.utils import export_to_video - -pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) -pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) -pipe.enable_model_cpu_offload() - -def infer(prompt): - negative_prompt = "text, watermark, copyright, blurry, nsfw" - video_frames = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames - video_path = export_to_video(video_frames) - print(video_path) - return video_path, gr.Group.update(visible=True) - -css = """ -#col-container {max-width: 510px; margin-left: auto; margin-right: auto;} -a {text-decoration-line: underline; font-weight: 600;} -.animate-spin { - animation: spin 1s linear infinite; -} - -@keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } -} - -#share-btn-container { - display: flex; - padding-left: 0.5rem !important; - padding-right: 0.5rem !important; - background-color: #000000; - justify-content: center; - align-items: center; - border-radius: 9999px !important; - max-width: 13rem; -} - -#share-btn-container:hover { - background-color: #060606; -} - -#share-btn { - all: initial; - color: #ffffff; - font-weight: 600; - cursor:pointer; - font-family: 'IBM Plex Sans', sans-serif; - margin-left: 0.5rem !important; - padding-top: 0.5rem !important; - padding-bottom: 0.5rem !important; - right:0; -} - -#share-btn * { - all: unset; -} - -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} - -#share-btn-container .wrap { - display: none !important; -} - -#share-btn-container.hidden { - display: none!important; -} -img[src*='#center'] { - display: block; - margin: auto; -} - -.footer { - margin-bottom: 45px; - margin-top: 10px; - text-align: center; - border-bottom: 1px solid #e5e5e5; - } - .footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; - } - .dark .footer { - border-color: #303030; - } - .dark .footer>p { - background: #0b0f19; - } -""" - -with gr.Blocks(css=css) as demo: - with gr.Column(elem_id="col-container"): - gr.Markdown( - """ -

      HedgehogAI

      -

      - This is a demo version of HedgehogAI, this is a neural network for generating video from text in a fast mode, developed by the CofAI team!
      -

      - - [![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm.svg#center)](https://huggingface.co/spaces/CofAI/hedgehog?duplicate=true) - - """ - ) - - prompt_in = gr.Textbox(label="Prompt", placeholder="Darth Vader is surfing on waves", elem_id="prompt-in") - #neg_prompt = gr.Textbox(label="Negative prompt", value="text, watermark, copyright, blurry, nsfw", elem_id="neg-prompt-in") - #inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=40, interactive=False) - submit_btn = gr.Button("Submit") - video_result = gr.Video(label="Video Output", elem_id="video-output") - - with gr.Group(elem_id="share-btn-container", visible=False) as share_group: - community_icon = gr.HTML(community_icon_html) - loading_icon = gr.HTML(loading_icon_html) - share_button = gr.Button("Share to community", elem_id="share-btn") - - gr.HTML(""" - -
      -

      Powered by:

      -
      - - - - - -
      -
      - """) - - submit_btn.click(fn=infer, - inputs=[prompt_in], - outputs=[video_result, share_group]) - - share_button.click(None, [], [], _js=share_js) - -demo.queue(max_size=12).launch() - \ No newline at end of file diff --git a/spaces/TRI-ML/risk_biased_prediction/risk_biased/config/planning_config.py b/spaces/TRI-ML/risk_biased_prediction/risk_biased/config/planning_config.py deleted file mode 100644 index 96548fca5a08b6027a68e66b1785c1c6ce605fd5..0000000000000000000000000000000000000000 --- a/spaces/TRI-ML/risk_biased_prediction/risk_biased/config/planning_config.py +++ /dev/null @@ -1,13 +0,0 @@ -# Tracking Cost Parameters -tracking_cost_scale_longitudinal = 1e-2 -tracking_cost_scale_lateral = 1e-0 -tracking_cost_reduce = "mean" - -# Cross Entropy Solver Parameters -num_control_samples = 100 -num_elite = 30 -iter_max = 10 -smoothing_factor = 0.2 -mean_warm_start = True -acceleration_std_x_m_s2 = 2.0 -acceleration_std_y_m_s2 = 0.0 diff --git a/spaces/TRI-ML/risk_biased_prediction/scripts/eval_scripts/plot_latent_travel_distance.py b/spaces/TRI-ML/risk_biased_prediction/scripts/eval_scripts/plot_latent_travel_distance.py deleted file mode 100644 index 1d55481fd9c4da67c03f8be512beb29adb6e542d..0000000000000000000000000000000000000000 --- a/spaces/TRI-ML/risk_biased_prediction/scripts/eval_scripts/plot_latent_travel_distance.py +++ /dev/null @@ -1,157 +0,0 @@ -# Lloyd algorithm while estimating average cost? - -import os - -import matplotlib.pyplot as plt -import matplotlib -import numpy as np -from pytorch_lightning.utilities.seed import seed_everything - -# from scipy.cluster.vq import kmeans2 -# from scipy.spatial import voronoi_plot_2d, Voronoi -import torch -from torch.utils.data import DataLoader - -from risk_biased.scene_dataset.loaders import SceneDataLoaders -from risk_biased.scene_dataset.scene import RandomSceneParams - -# from risk_biased.scene_dataset.scene_plotter import ScenePlotter -from risk_biased.utils.callbacks import DrawCallbackParams -from risk_biased.utils.config_argparse import config_argparse - -from risk_biased.utils.load_model import load_from_config - - -def draw_travel_distance_map( - model: torch.nn.Module, - selected_agent: int, - loader: DataLoader, - sqrt_n_samples: int, - params: DrawCallbackParams, -): - n_samples = sqrt_n_samples**2 - ( - normalized_input, - mask_input, - fut, - mask_fut, - mask_loss, - map, - mask_map, - offset, - ego_past, - ego_fut, - ) = next(iter(loader)) - - ego_traj = torch.cat((ego_past, ego_fut), dim=2) - n_scenes, n_agents, n_steps, features = normalized_input.shape - input_traj = SceneDataLoaders.unnormalize_trajectory(normalized_input, offset) - - # prior_samples = torch.rand(ped_trajs.shape[0], n_samples, 2)*6 - 3 - x = np.linspace(-3, 3, sqrt_n_samples) - y = np.linspace(-3, 3, sqrt_n_samples) - xx, yy = np.meshgrid(x, y) - - # Warning: if n_agents>1 the combinations of latent samples are not tested, this is not exploring all the possibilities. - prior_samples = ( - torch.from_numpy(np.stack((xx, yy), -1).astype("float32")) - .view(1, 1, n_samples, 2) - .repeat(n_scenes, n_agents, 1, 1) - ) - - mask_z = torch.ones_like(prior_samples[..., 0, 0]) - y = model.decode( - z_samples=prior_samples, - mask_z=mask_z, - x=normalized_input, - mask_x=mask_input, - map=map, - mask_map=mask_map, - offset=offset, - ) - - generated_trajs = ( - SceneDataLoaders.unnormalize_trajectory( - y, - offset, - ) - .cpu() - .detach() - .numpy() - ) - - # fig, ax = plt.subplots() - # plotter = ScenePlotter(scene, ax=ax) - # time = params.scene_params.sample_times[params.num_steps - 1] - # ind = 0 - # plotter.draw_scene(ind, time=time) - # plotter.draw_trajectory(input_traj[ind]) - # plotter.draw_all_trajectories(generated_trajs, color="r") - # plt.show() - - input_traj = np.repeat( - input_traj.reshape((n_scenes, n_agents, 1, params.num_steps, features)), - n_samples, - axis=2, - ) - - generated_ped_trajs = np.concatenate((input_traj, generated_trajs), axis=3) - - travel_distances = np.sqrt( - np.square( - generated_ped_trajs[:, :, :, -1] - generated_ped_trajs[:, :, :, 0] - ).sum(-1) - ) - - travel_distances = ( - travel_distances[:, selected_agent] - .reshape(n_scenes, sqrt_n_samples, sqrt_n_samples) - .mean(0) - ) - cmap = plt.get_cmap("RdBu_r") - vmin = params.scene_params.time_scene * params.scene_params.slow_speed - vmax = params.scene_params.time_scene * params.scene_params.fast_speed - plt.contourf( - xx, - yy, - travel_distances, - 50, - cmap=cmap, - extent=(-3, 3, -3, 3), - vmin=vmin, - vmax=vmax, - ) - norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax, clip=True) - sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) - plt.colorbar(sm, label="Travel distance") - plt.axis([-3, 3, -3, 3]) - plt.show() - - -if __name__ == "__main__": - # Draws a map, in the latent space, of travel distances averaged on a batch of input trajectories. - working_dir = os.path.dirname(os.path.realpath(__file__)) - config_path = os.path.join( - working_dir, "..", "..", "risk_biased", "config", "learning_config.py" - ) - cfg = config_argparse(config_path) - - cfg.batch_size = 128 - model, loaders, cfg = load_from_config(cfg) - assert ( - cfg.latent_dim == 2 - and "The latent dimension of the model must be exactly 2 to be plotted (no dimensionality reduction capabilities)" - ) - scene_params = RandomSceneParams.from_config(cfg) - draw_params = DrawCallbackParams.from_config(cfg) - if cfg.seed is not None: - seed_everything(cfg.seed) - - sqrt_n_samples = 20 - draw_travel_distance_map( - model.model, - 0, - loaders.val_dataloader(), - sqrt_n_samples, - draw_params, - ) diff --git a/spaces/TRI-ML/risk_biased_prediction/scripts/scripts_utils/generate_dataset_waymo.py b/spaces/TRI-ML/risk_biased_prediction/scripts/scripts_utils/generate_dataset_waymo.py deleted file mode 100644 index b56232d07bedfcee734f546153a1c03325edef07..0000000000000000000000000000000000000000 --- a/spaces/TRI-ML/risk_biased_prediction/scripts/scripts_utils/generate_dataset_waymo.py +++ /dev/null @@ -1,589 +0,0 @@ -import concurrent.futures -from concurrent.futures import ProcessPoolExecutor -import math -import os - -import fire -import numpy as np -import pickle -import tensorflow as tf -from tqdm import tqdm - -from waymo_open_dataset.protos import scenario_pb2 - - -def scalar_to_one_hot(length, index, has_zero=False): - if has_zero: - offset = 1 - else: - offset = 0 - assert 0 <= index < length + offset - if index + 1 - offset > 0: - one_hot_type = np.eye(length)[index - offset] - else: - one_hot_type = np.zeros(length) - - return one_hot_type - - -def group_tracks(tracks): - object_types = { - "TYPE_UNSET": 0, - "TYPE_VEHICLE": 1, - "TYPE_PEDESTRIAN": 2, - "TYPE_CYCLIST": 3, - "TYPE_OTHER": 4, - } - state_size = 11 - traj = np.zeros((len(tracks), len(tracks[0].states), state_size)) - mask_traj = np.zeros((len(tracks), len(tracks[0].states)), dtype=bool) - traj_type = np.zeros((len(tracks), len(object_types) - 1)) - id_to_idx = {} - - for i_track, track in enumerate(tracks): - traj_type[i_track, :] = scalar_to_one_hot( - len(object_types) - 1, track.object_type, has_zero=True - ) - id_to_idx[track.id] = i_track - for i_time, state in enumerate(track.states): - if state.valid: - traj[i_track, i_time, 0] = state.center_x - traj[i_track, i_time, 1] = state.center_y - traj[i_track, i_time, 2] = state.heading - traj[i_track, i_time, 3] = state.velocity_x - traj[i_track, i_time, 4] = state.velocity_y - traj[i_track, i_time, 5] = state.width - traj[i_track, i_time, 6] = state.length - traj[i_track, i_time, 7:11] = traj_type[i_track, :] - mask_traj[i_track, i_time] = state.center_x != 0 or state.center_y != 0 - else: - mask_traj[i_track, i_time] = False - - # Remove trajectories that are masked for the whole time - mask_any_time = mask_traj.any(-1) - to_delete = [] - for key, value in id_to_idx.items(): - if not mask_any_time[value]: - to_delete.append(key) - else: - id_to_idx[key] = np.sum(mask_any_time[:value]) - for key in to_delete: - del id_to_idx[key] - traj = traj[mask_any_time] - traj_type = traj_type[mask_any_time] - mask_traj = mask_traj[mask_any_time] - # traj:(n_agents, seq_time, features), mask:(n_agents, seq_time), traj_type:(n_agents, features) - assert (traj[..., :2][mask_traj] != 0).any(-1).all() - return traj, mask_traj, traj_type, id_to_idx - - -def filter_tracks( - pos, - trajs, - mask_trajs, - trajs_type, - to_predict, - id_to_idx, - mask_keep, - max_moving_distance, - max_static_distance, -): - distances2 = ((trajs[:, :, :2] - pos[None, None, :]) ** 2).sum(-1).min(1) - first_non_0_pos = np.take_along_axis( - trajs, np.argmax(mask_trajs, 1)[:, None, None], axis=1 - ) - is_moving = ( - np.abs((trajs[:, :, :2] - first_non_0_pos[:, 0:1, :2]) * mask_trajs[:, :, None]) - .sum(1) - .sum(1) - > 1 - ) - filtered = np.zeros_like(distances2, dtype=bool) - filtered[is_moving] = distances2[is_moving] < max_moving_distance**2 - filtered[np.logical_not(is_moving)] = ( - distances2[np.logical_not(is_moving)] < max_static_distance**2 - ) - filtered = np.logical_or(filtered, mask_keep) - - # Filter out trajectories - to_delete = [] - idx_to_id = {} - for key, value in id_to_idx.items(): - if not filtered[value]: - to_delete.append(key) - else: - new_value = np.sum(filtered[:value]) - idx_to_id[new_value] = key - id_to_idx[key] = new_value - for key in to_delete: - del id_to_idx[key] - - trajs = trajs[filtered] - trajs_type = trajs_type[filtered] - mask_trajs = mask_trajs[filtered] - to_predict = to_predict[filtered] - - if mask_keep.all(): - return trajs, mask_trajs, trajs_type, to_predict, id_to_idx - - # Sort entries from closest to furthest to input pos - distances2 = distances2[filtered] - distance_sort = np.argsort(distances2) - copy_trajs = trajs.copy() - copy_mask_trajs = mask_trajs.copy() - copy_trajs_type = trajs_type.copy() - copy_to_predict = to_predict.copy() - skip = np.argmin(mask_keep) - assert skip > 1 - offset = skip - for i, idx in enumerate(distance_sort[skip:]): - if idx > skip: - ii = i + offset - trajs[ii] = copy_trajs[idx] - trajs_type[ii] = copy_trajs_type[idx] - mask_trajs[ii] = copy_mask_trajs[idx] - to_predict[ii] = copy_to_predict[idx] - id_to_idx[idx_to_id[idx]] = ii - else: - offset -= 1 - assert (trajs[..., :2][mask_trajs] != 0).any(-1).all() - return trajs, mask_trajs, trajs_type, to_predict, id_to_idx - - -def cut_lane(lane, pos, max_len): - center_idx = np.argmin(((lane - pos[None, :]) ** 2).sum(-1)) - start = max(0, center_idx - max_len // 2) - return lane[start : start + max_len, :] - - -def group_lanes(map, center, max_lane_len, max_lane_distance): - all_objects = [] - all_types = [] - max_len = 0 - id_to_idx = {} - stride = 2 - max_lane_len = max_lane_len * stride - for object in map: - # Type one_hot encoding is as follows: 0: lane, 1: stop_sign, 2: cross_walk, 3: speed_bump - lane = object.lane.polyline - is_cut_lane = len(lane) > max_lane_len - len_lane = min(len(lane), max_lane_len) - len_cross_walk = len(object.crosswalk.polygon) - len_speed_bump = len(object.speed_bump.polygon) - num_obj_types = 4 - - max_len = max(max_len, len_lane) - max_len = max(max_len, len_cross_walk) - max_len = max(max_len, len_speed_bump) - if len_lane > 0: - current_lane = np.zeros((len(lane), 2)) - for i_point, cw in enumerate(lane): - current_lane[i_point, 0] = cw.x - current_lane[i_point, 1] = cw.y - if is_cut_lane: - current_lane = cut_lane(current_lane, center, max_lane_len) - min_distance2 = np.min(((current_lane - center[None, :]) ** 2).sum(-1)) - if min_distance2 < max_lane_distance**2: - id_to_idx[object.id] = len(all_objects) - all_objects.append(current_lane) - all_types.append(scalar_to_one_hot(num_obj_types, 0)) - # elif len_cross_walk > 0: - # current_cross_walk = np.zeros((len_cross_walk, 2)) - # for i_point, cw in enumerate(object.crosswalk.polygon): - # current_cross_walk[i_point, 0] = cw.x - # current_cross_walk[i_point, 1] = cw.y - # all_objects.append(current_cross_walk) - # all_types.append(scalar_to_one_hot(num_obj_types, 2)) - # elif len_speed_bump > 0: - # current_speed_bump = np.zeros((len_speed_bump, 2)) - # for i_point, cw in enumerate(object.speed_bump.polygon): - # current_speed_bump[i_point, 0] = cw.x - # current_speed_bump[i_point, 1] = cw.y - # all_objects.append(current_speed_bump) - # all_types.append(scalar_to_one_hot(num_obj_types, 3)) - # elif not (object.stop_sign.position.x == 0 and object.stop_sign.position.y == 0): - # all_objects.append([np.array([object.stop_sign.position.x, object.stop_sign.position.y])]) - # all_types.append(scalar_to_one_hot(num_obj_types, 1)) - - object_array = np.zeros((len(all_objects), (max_len + 1) // stride, 2)) - mask_object_array = np.zeros( - (len(all_objects), (max_len + 1) // stride), dtype=bool - ) - object_types_array = np.zeros((len(all_types), num_obj_types)) - - for i_object, object in enumerate(all_objects): - len_object = (len(object) + 1) // stride - object_array[i_object, :len_object, :] = object[::2] - mask_object_array[i_object, :len_object] = True - object_types_array[i_object] = all_types[i_object] - # for i, lane in enumerate(object_array): - # plt.plot(lane[mask_object_array[i, :], 0], lane[mask_object_array[i, :], 1], alpha=0.3) - - idx_to_id = {value: key for key, value in id_to_idx.items()} - # Sort entries from closest to furthest to input center - distances2 = np.min(((object_array - center[None, None, :]) ** 2).sum(-1), 1) - distance_sort = np.argsort(distances2) - copy_object = object_array.copy() - copy_mask_object = mask_object_array.copy() - copy_type = object_types_array.copy() - for i, idx in enumerate(distance_sort): - object_array[i] = copy_object[idx] - mask_object_array[i] = copy_mask_object[idx] - object_types_array[i] = copy_type[idx] - id_to_idx[idx_to_id[idx]] = i - - return object_array, mask_object_array, object_types_array, id_to_idx - - -def group_light_signals(light_signals, id_to_idx, n_map_objects): - state_to_idx = { - "TRAFFIC_LIGHT_STATE_UNKNOWN": 0, - "TRAFFIC_LIGHT_STATE_ARROW_STOP": 1, - "TRAFFIC_LIGHT_STATE_ARROW_CAUTION": 2, - "TRAFFIC_LIGHT_STATE_ARROW_GO": 3, - "TRAFFIC_LIGHT_STATE_STOP": 4, - "TRAFFIC_LIGHT_STATE_CAUTION": 5, - "TRAFFIC_LIGHT_STATE_GO": 6, - "TRAFFIC_LIGHT_STATE_FLASHING_STOP": 7, - "TRAFFIC_LIGHT_STATE_FLASHING_CAUTION": 8, - } - len_time = len(light_signals) - all_lanes_states = np.zeros((n_map_objects, len_time, len(state_to_idx) - 1)) - for t, lanes_states in enumerate(light_signals): - for lane in lanes_states.lane_states: - if lane.lane in id_to_idx.keys(): - all_lanes_states[id_to_idx[lane.lane], t, :] = scalar_to_one_hot( - len(state_to_idx) - 1, lane.state, True - ) - - # (n_objects, seq_time, features) - return all_lanes_states - - -def normalize_all(traj, map, pos, angle): - - c = math.cos(angle) - s = math.sin(angle) - rotation_mat = np.array([[c, s], [-s, c]]) - traj_clone = traj.clone() - traj_clone[..., :2] = ( - traj_clone[..., :2] - pos.reshape(([1] * (traj.ndim - 1)) + [2]) - ) @ rotation_mat - traj_clone[..., 2] = (traj_clone[..., 2] + angle + np.pi) % (2 * np.pi) - np.pi - if traj.shape[-1] >= 5: - traj_clone[..., 3:5] = traj_clone[..., 3:5] @ rotation_mat - map_clone = (map.clone() - pos.reshape(([1] * (map.ndim - 1)) + [2])) @ rotation_mat - - return traj_clone, map_clone - - -def fill_gaps(trajs, mask_in): - """ - If trajectories are partially observed with gaps (observed then not then observed again), fill the gaps with interpolations. - - Args: - - trajs: size (n_agents, time, features) features are organized as [x, y, angle, vx, vy, other_features ] - - """ - mask = mask_in.copy() - first_non_zeros = np.argmax(mask, 1) - last_non_zeros = mask.shape[1] - np.argmax(np.flip(mask, 1), 1) - has_gaps = np.logical_and( - last_non_zeros - first_non_zeros > np.maximum(mask.sum(1), 1), mask.sum(1) > 1 - ) - if not has_gaps.any(): - # No gap to fill, returning the input - return trajs - # iterate over agents - for i in range(trajs.shape[0]): - if has_gaps[i]: - left = first_non_zeros[i] - right = first_non_zeros[i] - for t in range(first_non_zeros[i], last_non_zeros[i]): - if mask[i, t] and left == right: - left += 1 - elif mask[i, t]: - break - else: - mask[i, t] = True - right += 1 - # Linear filling for positions: - trajs[i, left:right, :2] = (np.arange(right - left) / (right - left))[ - :, None - ] * (trajs[i, right, :2] - trajs[i, left - 1, :2])[None, :] + trajs[ - i, left - 1 : left, :2 - ] - # Linear filling for velocities and the rest: - trajs[i, left:right, 3:] = (np.arange(right - left) / (right - left))[ - :, None - ] * (trajs[i, right, 3:] - trajs[i, left - 1, 3:])[None, :] + trajs[ - i, left - 1 : left, 3: - ] - # Linear filling for angles (periodicity doesn't allow direct interpolation): - cos_traj = np.cos(trajs[i, left - 1 : right + 1, 2]) - sin_traj = np.sin(trajs[i, left - 1 : right + 1, 2]) - cos_traj = (np.arange(right - left) / (right - left)) * ( - cos_traj[-1] - cos_traj[0] - ) + cos_traj[0] - sin_traj = (np.arange(right - left) / (right - left)) * ( - sin_traj[-1] - sin_traj[0] - ) + sin_traj[0] - trajs[i, left:right, 2] = np.arctan2(sin_traj, cos_traj) - # Only the first gap was filled, recursive call to complete others - return fill_gaps(trajs, mask) - - -def group_scenario(scenario): - ids_of_interest = list(set(scenario.objects_of_interest)) - - # Only gather scenario with a pair of interacting vehicles - if len(ids_of_interest) != 2: - return None - - traj, mask_traj, traj_type, id_to_idx = group_tracks(scenario.tracks) - assert (traj[..., :2][mask_traj] != 0).any(-1).all() - - to_predict = np.zeros(traj.shape[0], dtype=bool) - for idx in scenario.tracks_to_predict: - to_predict[idx.track_index] = True - - # # Set ego as the first agent in the list of trajectories - # index_ego = scenario.sdc_track_index - # if index_ego != 0: - # for key, value in id_to_idx.items(): - # if value == 0: - # id_0 = key - # traj[[0, index_ego]] = traj[[index_ego, 0]] - # mask_traj[[0, index_ego]] = mask_traj[[index_ego, 0]] - # traj_type[[0, index_ego]] = traj_type[[index_ego, 0]] - # to_predict[[0, index_ego]] = to_predict[[index_ego, 0]] - # id_to_idx[id_0] = index_ego - # id_to_idx[scenario.sdc_track_index] = 0 - - # Set the agents of interest as the first agents in the list of trajectories - for key, value in id_to_idx.items(): - if value == 0: - id_0 = key - elif value == 1: - id_1 = key - indices_of_interest = sorted( - [id_to_idx[ids_of_interest[0]], id_to_idx[ids_of_interest[1]]] - ) - traj[[0, indices_of_interest[0]]] = traj[ - [ - indices_of_interest[0], - 0, - ] - ] - mask_traj[[0, indices_of_interest[0]]] = mask_traj[ - [ - indices_of_interest[0], - 0, - ] - ] - traj_type[[0, indices_of_interest[0]]] = traj_type[ - [ - indices_of_interest[0], - 0, - ] - ] - to_predict[[0, indices_of_interest[0]]] = to_predict[ - [ - indices_of_interest[0], - 0, - ] - ] - traj[[1, indices_of_interest[1]]] = traj[[indices_of_interest[1], 1]] - mask_traj[[1, indices_of_interest[1]]] = mask_traj[[indices_of_interest[1], 1]] - traj_type[[1, indices_of_interest[1]]] = traj_type[[indices_of_interest[1], 1]] - to_predict[[1, indices_of_interest[1]]] = to_predict[[indices_of_interest[1], 1]] - - id_to_idx[id_0] = id_to_idx[ids_of_interest[0]] - id_to_idx[ids_of_interest[0]] = 0 - id_to_idx[id_1] = id_to_idx[ids_of_interest[1]] - id_to_idx[ids_of_interest[1]] = 1 - - assert (traj[..., :2][mask_traj] != 0).any(-1).all() - - # ego_current_state = scenario.tracks[scenario.sdc_track_index].states[scenario.current_time_index] - # angle = ego_current_state.heading - traj = fill_gaps(traj, mask_traj) - pos = traj[0, scenario.current_time_index, :2] - angle = traj[0, scenario.current_time_index, 2] - # mask_agent_of_interest = np.zeros((traj.shape[0]), dtype=bool) - # idx_of_interest = [id_to_idx[id] for id in scenario.objects_of_interest] - # mask_agent_of_interest[idx_of_interest] = True - - traj, mask_traj, traj_type, to_predict, id_to_idx = filter_tracks( - pos, - traj, - mask_traj, - traj_type, - to_predict, - id_to_idx, - mask_keep=to_predict, - max_moving_distance=50, - max_static_distance=30, - ) - - assert (traj[..., :2][mask_traj] != 0).any(-1).all() - if traj.shape[0] > 100: - print(traj.shape[0]) - - map, mask_map, map_type, map_id_to_idx = group_lanes( - scenario.map_features, pos, max_lane_len=50, max_lane_distance=50 - ) - - lane_states = group_light_signals( - scenario.dynamic_map_states, map_id_to_idx, map.shape[0] - ) - - traj, map = normalize_all(traj, map, pos, -angle) - assert ( - ( - traj[0, scenario.current_time_index + 1 :, :2][ - mask_traj[0, scenario.current_time_index + 1 :] - ] - != 0 - ) - .any(-1) - .all() - ) - assert ( - ( - traj[0, : scenario.current_time_index, :2][ - mask_traj[0, : scenario.current_time_index] - ] - != 0 - ) - .any(-1) - .all() - ) - assert (traj[1:, :, :2][mask_traj[1:, :]] != 0).any(-1).all() - - len_pred = traj.shape[1] - scenario.current_time_index - 1 - - traj = traj.transpose((1, 0, 2)) - mask_traj = mask_traj.transpose((1, 0)) - map = map.transpose((1, 0, 2)) - mask_map = mask_map.transpose((1, 0)) - assert ( - ( - traj[scenario.current_time_index + 1 :, 0, :2][ - mask_traj[scenario.current_time_index + 1 :, 0] - ] - != 0 - ) - .any(-1) - .all() - ) - assert ( - ( - traj[: scenario.current_time_index, 0, :2][ - mask_traj[: scenario.current_time_index, 0] - ] - != 0 - ) - .any(-1) - .all() - ) - assert (traj[:, 1:, :2][mask_traj[:, 1:]] != 0).any(-1).all() - - # Mask futures for trajectories that are not to be predicted - traj = traj * mask_traj[:, :, None] - - # to_predict[0] = True - # to_predict[1] = True - # mask_traj[scenario.current_time_index+1:, np.logical_not(to_predict)] = 0 - mask_to_predict = mask_traj.copy() - mask_to_predict[:, np.logical_not(to_predict)] = False - assert ( - ( - traj[scenario.current_time_index + 1 :, 0, :2][ - mask_to_predict[scenario.current_time_index + 1 :, 0] - ] - != 0 - ) - .any(-1) - .all() - ) - assert ( - ( - traj[: scenario.current_time_index, 0, :2][ - mask_to_predict[: scenario.current_time_index, 0] - ] - != 0 - ) - .any(-1) - .all() - ) - assert (traj[:, 1:, :2][mask_to_predict[:, 1:]] != 0).any(-1).all() - - return { - "traj": traj, - "mask_traj": mask_traj, - "mask_to_predict": mask_to_predict, - "lanes": map, - "lane_states": lane_states, - "mask_lanes": mask_map, - "len_pred": len_pred, - "mean_pos": pos, - } - - -def preprocess_scenario(data, output_dir): - scenario = scenario_pb2.Scenario() - scenario.ParseFromString(data.numpy()) - scenario_id = scenario.scenario_id - scenario = group_scenario(scenario) - if scenario is not None: - with open(os.path.join(output_dir, scenario_id), "wb") as handle: - pickle.dump(scenario, handle) - - -def preprocess_scenarios(scenario_dir, output_dir, debug_size=None, num_parallel=8): - """Preprocesses waymo motion data in scenario file format. - - Args: - scenario_dir: Directory containing scenario files. - output_dir: Directory in which to output preprocessed samples - debug_size: If provided, limit to this number of output samples. - This is the _max_ number of samples, but fewer may result. - num_parallel: Number of processes to run in parallel. - Recommend to set this to number of cores - 1. - """ - assert os.path.exists(scenario_dir) - filenames = os.listdir(scenario_dir) - print(f"Saving files in {output_dir}") - filepaths = [os.path.join(scenario_dir, f) for f in filenames] - dataset = tf.data.TFRecordDataset(filepaths) - os.makedirs(output_dir, exist_ok=True) - - pool = ProcessPoolExecutor(num_parallel) - futures = [] - for i, data in enumerate(tqdm(dataset)): - future = pool.submit(preprocess_scenario, data=data, output_dir=output_dir) - # future = preprocess_scenario(data=data, output_dir=output_dir) - futures.append(future) - if debug_size is not None and i >= debug_size: - break - concurrent.futures.wait(futures) - pool.shutdown() - - -if __name__ == "__main__": - """ - The way this works is it provides a command line interface to the function - where you just pass whatever arguments the function takes to the script. - - You can get a help message with: - - $ python scripts/interaction_utils/generate_dataset_waymo.py -h - - An example you might call with: - - $ python scripts/interaction_utils/generate_dataset_waymo.py \ - /path/to/scenarios/training/ /path/to/output/training --debug_size=1000 --num_parallel=48 - """ - fire.Fire(preprocess_scenarios) diff --git a/spaces/TabPFN/TabPFNPrediction/TabPFN/encoders.py b/spaces/TabPFN/TabPFNPrediction/TabPFN/encoders.py deleted file mode 100644 index 00ffc84a0703aa0951f75ab35b01ca394489c90e..0000000000000000000000000000000000000000 --- a/spaces/TabPFN/TabPFNPrediction/TabPFN/encoders.py +++ /dev/null @@ -1,243 +0,0 @@ -import math - -import torch -import torch.nn as nn -from utils import normalize_data -import torch.nn.functional as F -from torch.nn import TransformerEncoder, TransformerEncoderLayer - - -class StyleEncoder(nn.Module): - def __init__(self, num_hyperparameters, em_size): - super().__init__() - self.em_size = em_size - self.embedding = nn.Linear(num_hyperparameters, self.em_size) - - def forward(self, hyperparameters): # B x num_hps - return self.embedding(hyperparameters) - - -class StyleEmbEncoder(nn.Module): - def __init__(self, num_hyperparameters, em_size, num_embeddings=100): - super().__init__() - assert num_hyperparameters == 1 - self.em_size = em_size - self.embedding = nn.Embedding(num_embeddings, self.em_size) - - def forward(self, hyperparameters): # B x num_hps - return self.embedding(hyperparameters.squeeze(1)) - - -class _PositionalEncoding(nn.Module): - def __init__(self, d_model, dropout=0.): - super().__init__() - self.dropout = nn.Dropout(p=dropout) - self.d_model = d_model - self.device_test_tensor = nn.Parameter(torch.tensor(1.)) - - def forward(self, x):# T x B x num_features - assert self.d_model % x.shape[-1]*2 == 0 - d_per_feature = self.d_model // x.shape[-1] - pe = torch.zeros(*x.shape, d_per_feature, device=self.device_test_tensor.device) - #position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) - interval_size = 10 - div_term = (1./interval_size) * 2*math.pi*torch.exp(torch.arange(0, d_per_feature, 2, device=self.device_test_tensor.device).float()*math.log(math.sqrt(2))) - #print(div_term/2/math.pi) - pe[..., 0::2] = torch.sin(x.unsqueeze(-1) * div_term) - pe[..., 1::2] = torch.cos(x.unsqueeze(-1) * div_term) - return self.dropout(pe).view(x.shape[0],x.shape[1],self.d_model) - - -Positional = lambda _, emsize: _PositionalEncoding(d_model=emsize) - -class EmbeddingEncoder(nn.Module): - def __init__(self, num_features, em_size, num_embs=100): - super().__init__() - self.num_embs = num_embs - self.embeddings = nn.Embedding(num_embs * num_features, em_size, max_norm=True) - self.init_weights(.1) - self.min_max = (-2,+2) - - @property - def width(self): - return self.min_max[1] - self.min_max[0] - - def init_weights(self, initrange): - self.embeddings.weight.data.uniform_(-initrange, initrange) - - def discretize(self, x): - split_size = self.width / self.num_embs - return (x - self.min_max[0] // split_size).int().clamp(0, self.num_embs - 1) - - def forward(self, x): # T x B x num_features - x_idxs = self.discretize(x) - x_idxs += torch.arange(x.shape[-1], device=x.device).view(1, 1, -1) * self.num_embs - # print(x_idxs,self.embeddings.weight.shape) - return self.embeddings(x_idxs).mean(-2) - - -class Normalize(nn.Module): - def __init__(self, mean, std): - super().__init__() - self.mean = mean - self.std = std - - def forward(self, x): - return (x-self.mean)/self.std - - -def get_normalized_uniform_encoder(encoder_creator): - """ - This can be used to wrap an encoder that is fed uniform samples in [0,1] and normalizes these to 0 mean and 1 std. - For example, it can be used as `encoder_creator = get_normalized_uniform_encoder(encoders.Linear)`, now this can - be initialized with `encoder_creator(feature_dim, in_dim)`. - :param encoder: - :return: - """ - return lambda in_dim, out_dim: nn.Sequential(Normalize(.5, math.sqrt(1/12)), encoder_creator(in_dim, out_dim)) - - -def get_normalized_encoder(encoder_creator, data_std): - return lambda in_dim, out_dim: nn.Sequential(Normalize(0., data_std), encoder_creator(in_dim, out_dim)) - - -class ZNormalize(nn.Module): - def forward(self, x): - return (x-x.mean(-1,keepdim=True))/x.std(-1,keepdim=True) - - -class AppendEmbeddingEncoder(nn.Module): - def __init__(self, base_encoder, num_features, emsize): - super().__init__() - self.num_features = num_features - self.base_encoder = base_encoder - self.emb = nn.Parameter(torch.zeros(emsize)) - - def forward(self, x): - if (x[-1] == 1.).all(): - append_embedding = True - else: - assert (x[-1] == 0.).all(), "You need to specify as last position whether to append embedding. " \ - "If you don't want this behavior, please use the wrapped encoder instead." - append_embedding = False - x = x[:-1] - encoded_x = self.base_encoder(x) - if append_embedding: - encoded_x = torch.cat([encoded_x, self.emb[None, None, :].repeat(1, encoded_x.shape[1], 1)], 0) - return encoded_x - -def get_append_embedding_encoder(encoder_creator): - return lambda num_features, emsize: AppendEmbeddingEncoder(encoder_creator(num_features, emsize), num_features, emsize) - - -class VariableNumFeaturesEncoder(nn.Module): - def __init__(self, base_encoder, num_features): - super().__init__() - self.base_encoder = base_encoder - self.num_features = num_features - - def forward(self, x): - x = x * (self.num_features/x.shape[-1]) - x = torch.cat((x, torch.zeros(*x.shape[:-1], self.num_features - x.shape[-1], device=x.device)), -1) - return self.base_encoder(x) - - -def get_variable_num_features_encoder(encoder_creator): - return lambda num_features, emsize: VariableNumFeaturesEncoder(encoder_creator(num_features, emsize), num_features) - -class NoMeanEncoder(nn.Module): - """ - This can be useful for any prior that is translation invariant in x or y. - A standard GP for example is translation invariant in x. - That is, GP(x_test+const,x_train+const,y_train) = GP(x_test,x_train,y_train). - """ - def __init__(self, base_encoder): - super().__init__() - self.base_encoder = base_encoder - - def forward(self, x): - return self.base_encoder(x - x.mean(0, keepdim=True)) - - -def get_no_mean_encoder(encoder_creator): - return lambda num_features, emsize: NoMeanEncoder(encoder_creator(num_features, emsize)) - -Linear = nn.Linear -MLP = lambda num_features, emsize: nn.Sequential(nn.Linear(num_features+1,emsize*2), - nn.ReLU(), - nn.Linear(emsize*2,emsize)) - -class NanHandlingEncoder(nn.Module): - def __init__(self, num_features, emsize, keep_nans=True): - super().__init__() - self.num_features = 2 * num_features if keep_nans else num_features - self.emsize = emsize - self.keep_nans = keep_nans - self.layer = nn.Linear(self.num_features, self.emsize) - - def forward(self, x): - if self.keep_nans: - x = torch.cat([torch.nan_to_num(x, nan=0.0), normalize_data(torch.isnan(x) * -1 - + torch.logical_and(torch.isinf(x), torch.sign(x) == 1) * 1 - + torch.logical_and(torch.isinf(x), torch.sign(x) == -1) * 2 - )], -1) - else: - x = torch.nan_to_num(x, nan=0.0) - return self.layer(x) - - -class Linear(nn.Linear): - def __init__(self, num_features, emsize, replace_nan_by_zero=False): - super().__init__(num_features, emsize) - self.num_features = num_features - self.emsize = emsize - self.replace_nan_by_zero = replace_nan_by_zero - - def forward(self, x): - if self.replace_nan_by_zero: - x = torch.nan_to_num(x, nan=0.0) - return super().forward(x) - - def __setstate__(self, state): - super().__setstate__(state) - self.__dict__.setdefault('replace_nan_by_zero', True) - - -class Conv(nn.Module): - def __init__(self, input_size, emsize): - super().__init__() - self.convs = torch.nn.ModuleList([nn.Conv2d(64 if i else 1, 64, 3) for i in range(5)]) - self.linear = nn.Linear(64,emsize) - - def forward(self, x): - size = math.isqrt(x.shape[-1]) - assert size*size == x.shape[-1] - x = x.reshape(*x.shape[:-1], 1, size, size) - for conv in self.convs: - if x.shape[-1] < 4: - break - x = conv(x) - x.relu_() - x = nn.AdaptiveAvgPool2d((1,1))(x).squeeze(-1).squeeze(-1) - return self.linear(x) - - -class CanEmb(nn.Embedding): - def __init__(self, num_features, num_embeddings: int, embedding_dim: int, *args, **kwargs): - assert embedding_dim % num_features == 0 - embedding_dim = embedding_dim // num_features - super().__init__(num_embeddings, embedding_dim, *args, **kwargs) - - def forward(self, x): - lx = x.long() - assert (lx == x).all(), "CanEmb only works with tensors of whole numbers" - x = super().forward(lx) - return x.view(*x.shape[:-2], -1) - - -def get_Canonical(num_classes): - return lambda num_features, emsize: CanEmb(num_features, num_classes, emsize) - - -def get_Embedding(num_embs_per_feature=100): - return lambda num_features, emsize: EmbeddingEncoder(num_features, emsize, num_embs=num_embs_per_feature) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/columns.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/columns.py deleted file mode 100644 index 669a3a7074f9a9e1af29cb4bc78b05851df67959..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/columns.py +++ /dev/null @@ -1,187 +0,0 @@ -from collections import defaultdict -from itertools import chain -from operator import itemgetter -from typing import Dict, Iterable, List, Optional, Tuple - -from .align import Align, AlignMethod -from .console import Console, ConsoleOptions, RenderableType, RenderResult -from .constrain import Constrain -from .measure import Measurement -from .padding import Padding, PaddingDimensions -from .table import Table -from .text import TextType -from .jupyter import JupyterMixin - - -class Columns(JupyterMixin): - """Display renderables in neat columns. - - Args: - renderables (Iterable[RenderableType]): Any number of Rich renderables (including str). - width (int, optional): The desired width of the columns, or None to auto detect. Defaults to None. - padding (PaddingDimensions, optional): Optional padding around cells. Defaults to (0, 1). - expand (bool, optional): Expand columns to full width. Defaults to False. - equal (bool, optional): Arrange in to equal sized columns. Defaults to False. - column_first (bool, optional): Align items from top to bottom (rather than left to right). Defaults to False. - right_to_left (bool, optional): Start column from right hand side. Defaults to False. - align (str, optional): Align value ("left", "right", or "center") or None for default. Defaults to None. - title (TextType, optional): Optional title for Columns. - """ - - def __init__( - self, - renderables: Optional[Iterable[RenderableType]] = None, - padding: PaddingDimensions = (0, 1), - *, - width: Optional[int] = None, - expand: bool = False, - equal: bool = False, - column_first: bool = False, - right_to_left: bool = False, - align: Optional[AlignMethod] = None, - title: Optional[TextType] = None, - ) -> None: - self.renderables = list(renderables or []) - self.width = width - self.padding = padding - self.expand = expand - self.equal = equal - self.column_first = column_first - self.right_to_left = right_to_left - self.align: Optional[AlignMethod] = align - self.title = title - - def add_renderable(self, renderable: RenderableType) -> None: - """Add a renderable to the columns. - - Args: - renderable (RenderableType): Any renderable object. - """ - self.renderables.append(renderable) - - def __rich_console__( - self, console: Console, options: ConsoleOptions - ) -> RenderResult: - render_str = console.render_str - renderables = [ - render_str(renderable) if isinstance(renderable, str) else renderable - for renderable in self.renderables - ] - if not renderables: - return - _top, right, _bottom, left = Padding.unpack(self.padding) - width_padding = max(left, right) - max_width = options.max_width - widths: Dict[int, int] = defaultdict(int) - column_count = len(renderables) - - get_measurement = Measurement.get - renderable_widths = [ - get_measurement(console, options, renderable).maximum - for renderable in renderables - ] - if self.equal: - renderable_widths = [max(renderable_widths)] * len(renderable_widths) - - def iter_renderables( - column_count: int, - ) -> Iterable[Tuple[int, Optional[RenderableType]]]: - item_count = len(renderables) - if self.column_first: - width_renderables = list(zip(renderable_widths, renderables)) - - column_lengths: List[int] = [item_count // column_count] * column_count - for col_no in range(item_count % column_count): - column_lengths[col_no] += 1 - - row_count = (item_count + column_count - 1) // column_count - cells = [[-1] * column_count for _ in range(row_count)] - row = col = 0 - for index in range(item_count): - cells[row][col] = index - column_lengths[col] -= 1 - if column_lengths[col]: - row += 1 - else: - col += 1 - row = 0 - for index in chain.from_iterable(cells): - if index == -1: - break - yield width_renderables[index] - else: - yield from zip(renderable_widths, renderables) - # Pad odd elements with spaces - if item_count % column_count: - for _ in range(column_count - (item_count % column_count)): - yield 0, None - - table = Table.grid(padding=self.padding, collapse_padding=True, pad_edge=False) - table.expand = self.expand - table.title = self.title - - if self.width is not None: - column_count = (max_width) // (self.width + width_padding) - for _ in range(column_count): - table.add_column(width=self.width) - else: - while column_count > 1: - widths.clear() - column_no = 0 - for renderable_width, _ in iter_renderables(column_count): - widths[column_no] = max(widths[column_no], renderable_width) - total_width = sum(widths.values()) + width_padding * ( - len(widths) - 1 - ) - if total_width > max_width: - column_count = len(widths) - 1 - break - else: - column_no = (column_no + 1) % column_count - else: - break - - get_renderable = itemgetter(1) - _renderables = [ - get_renderable(_renderable) - for _renderable in iter_renderables(column_count) - ] - if self.equal: - _renderables = [ - None - if renderable is None - else Constrain(renderable, renderable_widths[0]) - for renderable in _renderables - ] - if self.align: - align = self.align - _Align = Align - _renderables = [ - None if renderable is None else _Align(renderable, align) - for renderable in _renderables - ] - - right_to_left = self.right_to_left - add_row = table.add_row - for start in range(0, len(_renderables), column_count): - row = _renderables[start : start + column_count] - if right_to_left: - row = row[::-1] - add_row(*row) - yield table - - -if __name__ == "__main__": # pragma: no cover - import os - - console = Console() - - files = [f"{i} {s}" for i, s in enumerate(sorted(os.listdir()))] - columns = Columns(files, padding=(0, 1), expand=False, equal=False) - console.print(columns) - console.rule() - columns.column_first = True - console.print(columns) - columns.right_to_left = True - console.rule() - console.print(columns) diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/transforms/custom_transform.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/transforms/custom_transform.py deleted file mode 100644 index 654d65d97d90a66e45b414bb878b13ba9f64e70a..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/transforms/custom_transform.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# Modified by Xingyi Zhou -# File: transform.py - -import numpy as np -import torch -import torch.nn.functional as F -from fvcore.transforms.transform import ( - CropTransform, - HFlipTransform, - NoOpTransform, - Transform, - TransformList, -) -from PIL import Image - -try: - import cv2 # noqa -except ImportError: - # OpenCV is an optional dependency at the moment - pass - -__all__ = [ - "EfficientDetResizeCropTransform", -] - - -class EfficientDetResizeCropTransform(Transform): - """ - """ - - def __init__(self, scaled_h, scaled_w, offset_y, offset_x, img_scale, target_size, interp=None): - """ - Args: - h, w (int): original image size - new_h, new_w (int): new image size - interp: PIL interpolation methods, defaults to bilinear. - """ - # TODO decide on PIL vs opencv - super().__init__() - if interp is None: - interp = Image.BILINEAR - self._set_attributes(locals()) - - def apply_image(self, img, interp=None): - # assert img.shape[:2] == (self.h, self.w) - assert len(img.shape) <= 4 - - if img.dtype == np.uint8: - pil_image = Image.fromarray(img) - interp_method = interp if interp is not None else self.interp - pil_image = pil_image.resize((self.scaled_w, self.scaled_h), interp_method) - ret = np.asarray(pil_image) - right = min(self.scaled_w, self.offset_x + self.target_size[1]) - lower = min(self.scaled_h, self.offset_y + self.target_size[0]) - # img = img.crop((self.offset_x, self.offset_y, right, lower)) - if len(ret.shape) <= 3: - ret = ret[self.offset_y: lower, self.offset_x: right] - else: - ret = ret[..., self.offset_y: lower, self.offset_x: right, :] - else: - # PIL only supports uint8 - img = torch.from_numpy(img) - shape = list(img.shape) - shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:] - img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw - _PIL_RESIZE_TO_INTERPOLATE_MODE = {Image.BILINEAR: "bilinear", Image.BICUBIC: "bicubic"} - mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[self.interp] - img = F.interpolate(img, (self.scaled_h, self.scaled_w), mode=mode, align_corners=False) - shape[:2] = (self.scaled_h, self.scaled_w) - ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c) - right = min(self.scaled_w, self.offset_x + self.target_size[1]) - lower = min(self.scaled_h, self.offset_y + self.target_size[0]) - if len(ret.shape) <= 3: - ret = ret[self.offset_y: lower, self.offset_x: right] - else: - ret = ret[..., self.offset_y: lower, self.offset_x: right, :] - return ret - - def apply_coords(self, coords): - coords[:, 0] = coords[:, 0] * self.img_scale - coords[:, 1] = coords[:, 1] * self.img_scale - coords[:, 0] -= self.offset_x - coords[:, 1] -= self.offset_y - return coords - - def apply_segmentation(self, segmentation): - segmentation = self.apply_image(segmentation, interp=Image.NEAREST) - return segmentation - - def inverse(self): - raise NotImplementedError - # return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp) \ No newline at end of file diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/data/test_detection_utils.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/data/test_detection_utils.py deleted file mode 100644 index aac56c07da2be4e181e3e95de8cee1fc2858286d..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/data/test_detection_utils.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import copy -import numpy as np -import os -import unittest -import pycocotools.mask as mask_util - -from detectron2.data import MetadataCatalog, detection_utils -from detectron2.data import transforms as T -from detectron2.structures import BitMasks, BoxMode -from detectron2.utils.file_io import PathManager - - -class TestTransformAnnotations(unittest.TestCase): - def test_transform_simple_annotation(self): - transforms = T.TransformList([T.HFlipTransform(400)]) - anno = { - "bbox": np.asarray([10, 10, 200, 300]), - "bbox_mode": BoxMode.XYXY_ABS, - "category_id": 3, - "segmentation": [[10, 10, 100, 100, 100, 10], [150, 150, 200, 150, 200, 200]], - } - - output = detection_utils.transform_instance_annotations(anno, transforms, (400, 400)) - self.assertTrue(np.allclose(output["bbox"], [200, 10, 390, 300])) - self.assertEqual(len(output["segmentation"]), len(anno["segmentation"])) - self.assertTrue(np.allclose(output["segmentation"][0], [390, 10, 300, 100, 300, 10])) - - detection_utils.annotations_to_instances([output, output], (400, 400)) - - def test_transform_empty_annotation(self): - detection_utils.annotations_to_instances([], (400, 400)) - - def test_flip_keypoints(self): - transforms = T.TransformList([T.HFlipTransform(400)]) - anno = { - "bbox": np.asarray([10, 10, 200, 300]), - "bbox_mode": BoxMode.XYXY_ABS, - "keypoints": np.random.rand(17, 3) * 50 + 15, - } - - output = detection_utils.transform_instance_annotations( - copy.deepcopy(anno), - transforms, - (400, 400), - keypoint_hflip_indices=detection_utils.create_keypoint_hflip_indices( - ["keypoints_coco_2017_train"] - ), - ) - # The first keypoint is nose - self.assertTrue(np.allclose(output["keypoints"][0, 0], 400 - anno["keypoints"][0, 0])) - # The last 16 keypoints are 8 left-right pairs - self.assertTrue( - np.allclose( - output["keypoints"][1:, 0].reshape(-1, 2)[:, ::-1], - 400 - anno["keypoints"][1:, 0].reshape(-1, 2), - ) - ) - self.assertTrue( - np.allclose( - output["keypoints"][1:, 1:].reshape(-1, 2, 2)[:, ::-1, :], - anno["keypoints"][1:, 1:].reshape(-1, 2, 2), - ) - ) - - def test_crop(self): - transforms = T.TransformList([T.CropTransform(300, 300, 10, 10)]) - keypoints = np.random.rand(17, 3) * 50 + 15 - keypoints[:, 2] = 2 - anno = { - "bbox": np.asarray([10, 10, 200, 400]), - "bbox_mode": BoxMode.XYXY_ABS, - "keypoints": keypoints, - } - - output = detection_utils.transform_instance_annotations( - copy.deepcopy(anno), transforms, (10, 10) - ) - # box is shifted and cropped - self.assertTrue((output["bbox"] == np.asarray([0, 0, 0, 10])).all()) - # keypoints are no longer visible - self.assertTrue((output["keypoints"][:, 2] == 0).all()) - - def test_transform_RLE(self): - transforms = T.TransformList([T.HFlipTransform(400)]) - mask = np.zeros((300, 400), order="F").astype("uint8") - mask[:, :200] = 1 - - anno = { - "bbox": np.asarray([10, 10, 200, 300]), - "bbox_mode": BoxMode.XYXY_ABS, - "segmentation": mask_util.encode(mask[:, :, None])[0], - "category_id": 3, - } - output = detection_utils.transform_instance_annotations( - copy.deepcopy(anno), transforms, (300, 400) - ) - mask = output["segmentation"] - self.assertTrue((mask[:, 200:] == 1).all()) - self.assertTrue((mask[:, :200] == 0).all()) - - inst = detection_utils.annotations_to_instances( - [output, output], (400, 400), mask_format="bitmask" - ) - self.assertTrue(isinstance(inst.gt_masks, BitMasks)) - - def test_transform_RLE_resize(self): - transforms = T.TransformList( - [T.HFlipTransform(400), T.ScaleTransform(300, 400, 400, 400, "bilinear")] - ) - mask = np.zeros((300, 400), order="F").astype("uint8") - mask[:, :200] = 1 - - anno = { - "bbox": np.asarray([10, 10, 200, 300]), - "bbox_mode": BoxMode.XYXY_ABS, - "segmentation": mask_util.encode(mask[:, :, None])[0], - "category_id": 3, - } - output = detection_utils.transform_instance_annotations( - copy.deepcopy(anno), transforms, (400, 400) - ) - - inst = detection_utils.annotations_to_instances( - [output, output], (400, 400), mask_format="bitmask" - ) - self.assertTrue(isinstance(inst.gt_masks, BitMasks)) - - def test_gen_crop(self): - instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS} - t = detection_utils.gen_crop_transform_with_instance((10, 10), (150, 150), instance) - # the box center must fall into the cropped region - self.assertTrue(t.x0 <= 55 <= t.x0 + t.w) - - def test_gen_crop_outside_boxes(self): - instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS} - with self.assertRaises(AssertionError): - detection_utils.gen_crop_transform_with_instance((10, 10), (15, 15), instance) - - def test_read_sem_seg(self): - cityscapes_dir = MetadataCatalog.get("cityscapes_fine_sem_seg_val").gt_dir - sem_seg_gt_path = os.path.join( - cityscapes_dir, "frankfurt", "frankfurt_000001_083852_gtFine_labelIds.png" - ) - if not PathManager.exists(sem_seg_gt_path): - raise unittest.SkipTest( - "Semantic segmentation ground truth {} not found.".format(sem_seg_gt_path) - ) - sem_seg = detection_utils.read_image(sem_seg_gt_path, "L") - self.assertEqual(sem_seg.ndim, 3) - self.assertEqual(sem_seg.shape[2], 1) - self.assertEqual(sem_seg.dtype, np.uint8) - self.assertEqual(sem_seg.max(), 32) - self.assertEqual(sem_seg.min(), 1) - - def test_read_exif_orientation(self): - # https://github.com/recurser/exif-orientation-examples/raw/master/Landscape_5.jpg - URL = "detectron2://assets/Landscape_5.jpg" - img = detection_utils.read_image(URL, "RGB") - self.assertEqual(img.ndim, 3) - self.assertEqual(img.dtype, np.uint8) - self.assertEqual(img.shape, (1200, 1800, 3)) # check that shape is not transposed - - def test_opencv_exif_orientation(self): - import cv2 - - URL = "detectron2://assets/Landscape_5.jpg" - with PathManager.open(URL, "rb") as f: - img = cv2.imdecode(np.frombuffer(f.read(), dtype="uint8"), cv2.IMREAD_COLOR) - self.assertEqual(img.dtype, np.uint8) - self.assertEqual(img.shape, (1200, 1800, 3)) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Th3BossC/TranscriptApi/TranscriptApi/models.py b/spaces/Th3BossC/TranscriptApi/TranscriptApi/models.py deleted file mode 100644 index b22289025f58cc9d9497db1d7752ef2382abb8ac..0000000000000000000000000000000000000000 --- a/spaces/Th3BossC/TranscriptApi/TranscriptApi/models.py +++ /dev/null @@ -1,24 +0,0 @@ -from TranscriptApi import db -from datetime import datetime - -class VideoSummary(db.Model): - id = db.Column(db.Integer, primary_key = True) - date = db.Column(db.DateTime(), nullable = False, default = datetime.utcnow) - video_id = db.Column(db.String(10), unique = True, nullable = False) - title = db.Column(db.String(100), nullable = False) - transcript = db.Column(db.Text(), nullable = False) - summary = db.Column(db.Text(), nullable = False) - - def __repr__(self): - f'VideoSummary({self.id}, {self.video_id}, {self.title})' - - -class FileSummary(db.Model): - id = db.Column(db.Integer, primary_key = True) - date = db.Column(db.DateTime(), nullable = False, default = datetime.utcnow) - title = db.Column(db.String(100), nullable = False) - transcript = db.Column(db.Text(), nullable = False) - summary = db.Column(db.Text(), nullable = False) - - def __repr__(self): - f"FileSummary({self.id}, {self.title})" \ No newline at end of file diff --git a/spaces/Thafx/sdrv40/app.py b/spaces/Thafx/sdrv40/app.py deleted file mode 100644 index e34c0fa567d7ddf0d49c83fd07016fb15f358902..0000000000000000000000000000000000000000 --- a/spaces/Thafx/sdrv40/app.py +++ /dev/null @@ -1,189 +0,0 @@ -from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler -import gradio as gr -import torch -from PIL import Image - -model_id = 'SG161222/Realistic_Vision_V4.0_noVAE' -prefix = 'RAW photo,' - -scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") - -pipe = StableDiffusionPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - pipe_i2i = pipe_i2i.to("cuda") - -def error_str(error, title="Error"): - return f"""#### {title} - {error}""" if error else "" - - -def _parse_args(prompt, generator): - parser = argparse.ArgumentParser( - description="making it work." - ) - parser.add_argument( - "--no-half-vae", help="no half vae" - ) - - cmdline_args = parser.parse_args() - command = cmdline_args.command - conf_file = cmdline_args.conf_file - conf_args = Arguments(conf_file) - opt = conf_args.readArguments() - - if cmdline_args.config_overrides: - for config_override in cmdline_args.config_overrides.split(";"): - config_override = config_override.strip() - if config_override: - var_val = config_override.split("=") - assert ( - len(var_val) == 2 - ), f"Config override '{var_val}' does not have the form 'VAR=val'" - conf_args.add_opt(opt, var_val[0], var_val[1], force_override=True) - -def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False): - generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None - prompt = f"{prefix} {prompt}" if auto_prefix else prompt - - try: - if img is not None: - return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None - else: - return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None - except Exception as e: - return None, error_str(e) - - - -def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator): - - result = pipe( - prompt, - negative_prompt = neg_prompt, - num_inference_steps = int(steps), - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator): - - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe_i2i( - prompt, - negative_prompt = neg_prompt, - init_image = img, - num_inference_steps = int(steps), - strength = strength, - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - - def fake_safety_checker(images, **kwargs): - return result.images[0], [False] * len(images) - - pipe.safety_checker = fake_safety_checker - -css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} -""" -with gr.Blocks(css=css) as demo: - gr.HTML( - f""" -
      -
      -

      📷 Realistic Vision V4.0 📸

      -
      -

      - Demo for Realistic Vision V4.0 - Stable Diffusion model by Eugene. {"" if prefix else ""} - Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU ⚡"}. -

      -

      Please use the prompt template below to get an example of the desired generation results: -

      - -Prompt: -
      -* subject *, (high detailed skin:1.2), 8k uhd, dslr, soft lighting, high quality, film grain, Fujifilm XT3 -
      -
      - -Example: a close up portrait photo of 26 y.o woman in wastelander clothes, long haircut, pale skin, slim body, background is city ruins,
      -(high detailed skin:1.2), 8k uhd, dslr, soft lighting, high quality, film grain, Fujifilm XT3 -
      -
      - -
      -Negative Prompt: -
      -(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality,
      -low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry,
      -dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms,
      -extra legs, fused fingers, too many fingers, long neck -
      - -
      -Have Fun & Enjoy ⚡ //THAFX -
      - -
      - """ - ) - with gr.Row(): - - with gr.Column(scale=55): - with gr.Group(): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", show_label=False,max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False) - generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) - - image_out = gr.Image(height=512) - error_output = gr.Markdown() - - with gr.Column(scale=45): - with gr.Tab("Options"): - with gr.Group(): - neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") - auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically (RAW photo,)", value=prefix, visible=prefix) - - with gr.Row(): - guidance = gr.Slider(label="Guidance scale", value=5, maximum=15) - steps = gr.Slider(label="Steps", value=20, minimum=2, maximum=75, step=1) - - with gr.Row(): - width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8) - height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8) - - seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) - - with gr.Tab("Image to image"): - with gr.Group(): - image = gr.Image(label="Image", height=256, tool="editor", type="pil") - strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) - - auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False) - - inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix] - outputs = [image_out, error_output] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - - - -demo.queue(concurrency_count=1) -demo.launch() diff --git a/spaces/ThirdEyeData/Semantic-Search/app.py b/spaces/ThirdEyeData/Semantic-Search/app.py deleted file mode 100644 index 76d8516359683b93a0dfe10b5b383a29678cb79a..0000000000000000000000000000000000000000 --- a/spaces/ThirdEyeData/Semantic-Search/app.py +++ /dev/null @@ -1,77 +0,0 @@ -import pandas as pd -import tiktoken -import os -import openai -from openai.embeddings_utils import get_embedding, cosine_similarity -import numpy as np -import streamlit as st - -input_datapath = "fine_food_reviews_with_embeddings_1k.csv" -df = pd.read_csv(input_datapath, index_col=0) - -#os.environ["OPENAI_API_KEY"] = st.secrets("OPENAI_API_KEY") -#openai.api_key = st.secrets("OPENAI_API_KEY") -st.title("Semantic Search") - - -#adding another column having the summary as title and the actual text as content -df["combined"] = ( - "Title: " + df.Summary.str.strip() + "; Content: " + df.Text.str.strip() -) - - -# embedding model parameters -embedding_model = "text-embedding-ada-002" -embedding_encoding = "cl100k_base" # this the encoding for text-embedding-ada-002 -max_tokens = 8000 # the maximum for text-embedding-ada-002 is 8191 - - -encoding = tiktoken.get_encoding(embedding_encoding) -top_n = 500 -# omit reviews that are too long to embed -df["n_tokens"] = df.combined.apply(lambda x: len(encoding.encode(x))) -df = df[df.n_tokens <= max_tokens].tail(top_n) - - -datafile_path = "fine_food_reviews_with_embeddings_1k.csv" -df = pd.read_csv(datafile_path) -df["embedding"] = df.embedding.apply(eval).apply(np.array) - -# search through the reviews for a specific product -def search_reviews(df, product_description, n=3, pprint=True): - product_embedding = get_embedding( - product_description, - engine="text-embedding-ada-002" - ) - df["similarity"] = df.embedding.apply(lambda x: cosine_similarity(x, product_embedding)) - - results = ( - df.sort_values("similarity", ascending=False) - .head(n) - .combined.str.replace("Title: ", "") - .str.replace("; Content:", ": ") - ) - - product = ( - df.sort_values("similarity", ascending=False) - .head(n) - .ProductId - ) - - if pprint: - for r in range(n): - idx = results.index[r] - print("Product : ",product[idx]) - print(results[idx]) - print() - return results,product - - -prompt = st.text_input("What do you want to search for? : ","pizza") - - -top_n = st.number_input("How many results do you want to see? : ", min_value = 1) -results,product = search_reviews(df, prompt, top_n) -if st.button("Search Reviews"): - st.write(product,results) - diff --git a/spaces/Toaster496/HugChatWithPlugin/README.md b/spaces/Toaster496/HugChatWithPlugin/README.md deleted file mode 100644 index 2f58cc2f75de2048748c497374ca9ef3bc2a2464..0000000000000000000000000000000000000000 --- a/spaces/Toaster496/HugChatWithPlugin/README.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: HugChatWithPlugin -emoji: 🔥 -colorFrom: red -colorTo: indigo -sdk: streamlit -sdk_version: 1.5.0 -app_file: app.py -pinned: true -models: -- gpt2 -- distilbert-base-uncased -tags: -- NLP -- Chatbot -hf_oauth: true -hf_oauth_redirect_path: /auth/callback -license: gpl-2.0 ---- - -Check out the configuration reference at [https://huggingface.co/docs/hub/spaces-config-reference](https://huggingface.co/docs/hub/spaces-config-reference) \ No newline at end of file diff --git a/spaces/Trangluna2002/AI_Cover_Gen/src/vc_infer_pipeline.py b/spaces/Trangluna2002/AI_Cover_Gen/src/vc_infer_pipeline.py deleted file mode 100644 index 25f873e1e210879e085afd073306d796bf5114ea..0000000000000000000000000000000000000000 --- a/spaces/Trangluna2002/AI_Cover_Gen/src/vc_infer_pipeline.py +++ /dev/null @@ -1,653 +0,0 @@ -from functools import lru_cache -from time import time as ttime - -import faiss -import librosa -import numpy as np -import os -import parselmouth -import pyworld -import sys -import torch -import torch.nn.functional as F -import torchcrepe -import traceback -from scipy import signal -from torch import Tensor - -BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -now_dir = os.path.join(BASE_DIR, 'src') -sys.path.append(now_dir) - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -input_audio_path2wav = {} - - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, - fs=fs, - f0_ceil=f0max, - f0_floor=f0min, - frame_period=frame_period, - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - - -def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 - # print(data1.max(),data2.max()) - rms1 = librosa.feature.rms( - y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 - ) # 每半秒一个点 - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.from_numpy(rms2) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= ( - torch.pow(rms1, torch.tensor(1 - rate)) - * torch.pow(rms2, torch.tensor(rate - 1)) - ).numpy() - return data2 - - -class VC(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - - # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device) - def get_optimal_torch_device(self, index: int = 0) -> torch.device: - # Get cuda device - if torch.cuda.is_available(): - return torch.device( - f"cuda:{index % torch.cuda.device_count()}" - ) # Very fast - elif torch.backends.mps.is_available(): - return torch.device("mps") - # Insert an else here to grab "xla" devices if available. TO DO later. Requires the torch_xla.core.xla_model library - # Else wise return the "cpu" as a torch device, - return torch.device("cpu") - - # Fork Feature: Compute f0 with the crepe method - def get_f0_crepe_computation( - self, - x, - f0_min, - f0_max, - p_len, - hop_length=160, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time. - model="full", # Either use crepe-tiny "tiny" or crepe "full". Default is full - ): - x = x.astype( - np.float32 - ) # fixes the F.conv2D exception. We needed to convert double to float. - x /= np.quantile(np.abs(x), 0.999) - torch_device = self.get_optimal_torch_device() - audio = torch.from_numpy(x).to(torch_device, copy=True) - audio = torch.unsqueeze(audio, dim=0) - if audio.ndim == 2 and audio.shape[0] > 1: - audio = torch.mean(audio, dim=0, keepdim=True).detach() - audio = audio.detach() - print("Initiating prediction with a crepe_hop_length of: " + str(hop_length)) - pitch: Tensor = torchcrepe.predict( - audio, - self.sr, - hop_length, - f0_min, - f0_max, - model, - batch_size=hop_length * 2, - device=torch_device, - pad=True, - ) - p_len = p_len or x.shape[0] // hop_length - # Resize the pitch for final f0 - source = np.array(pitch.squeeze(0).cpu().float().numpy()) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * p_len, len(source)) / p_len, - np.arange(0, len(source)), - source, - ) - f0 = np.nan_to_num(target) - return f0 # Resized f0 - - def get_f0_official_crepe_computation( - self, - x, - f0_min, - f0_max, - model="full", - ): - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - return f0 - - # Fork Feature: Compute pYIN f0 method - def get_f0_pyin_computation(self, x, f0_min, f0_max): - y, sr = librosa.load("saudio/Sidney.wav", self.sr, mono=True) - f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max) - f0 = f0[1:] # Get rid of extra first frame - return f0 - - # Fork Feature: Acquire median hybrid f0 estimation calculation - def get_f0_hybrid_computation( - self, - methods_str, - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step, - ): - # Get various f0 methods from input to use in the computation stack - s = methods_str - s = s.split("hybrid")[1] - s = s.replace("[", "").replace("]", "") - methods = s.split("+") - f0_computation_stack = [] - - print("Calculating f0 pitch estimations for methods: %s" % str(methods)) - x = x.astype(np.float32) - x /= np.quantile(np.abs(x), 0.999) - # Get f0 calculations for all methods specified - for method in methods: - f0 = None - if method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif method == "crepe": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max) - f0 = f0[1:] # Get rid of extra first frame - elif method == "crepe-tiny": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, "tiny") - f0 = f0[1:] # Get rid of extra first frame - elif method == "mangio-crepe": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length - ) - elif method == "mangio-crepe-tiny": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length, "tiny" - ) - elif method == "harvest": - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - f0 = f0[1:] # Get rid of first frame. - elif method == "dio": # Potentially buggy? - f0, t = pyworld.dio( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 = f0[1:] - # elif method == "pyin": Not Working just yet - # f0 = self.get_f0_pyin_computation(x, f0_min, f0_max) - # Push method to the stack - f0_computation_stack.append(f0) - - for fc in f0_computation_stack: - print(len(fc)) - - print("Calculating hybrid median f0 from the stack of: %s" % str(methods)) - f0_median_hybrid = None - if len(f0_computation_stack) == 1: - f0_median_hybrid = f0_computation_stack[0] - else: - f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) - return f0_median_hybrid - - def get_f0( - self, - input_audio_path, - x, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - inp_f0=None, - ): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - elif f0_method == "dio": # Potentially Buggy? - f0, t = pyworld.dio( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - elif f0_method == "crepe": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max) - elif f0_method == "crepe-tiny": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, "tiny") - elif f0_method == "mangio-crepe": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length - ) - elif f0_method == "mangio-crepe-tiny": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length, "tiny" - ) - elif f0_method == "rmvpe": - if hasattr(self, "model_rmvpe") == False: - from rmvpe import RMVPE - - self.model_rmvpe = RMVPE( - os.path.join(BASE_DIR, 'rvc_models', 'rmvpe.pt'), is_half=self.is_half, device=self.device - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - - elif "hybrid" in f0_method: - # Perform hybrid median pitch estimation - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = self.get_f0_hybrid_computation( - f0_method, - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step, - ) - - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = feats.clone() - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( - 0, 2, 1 - ) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch != None and pitchf != None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]) - .data.cpu() - .float() - .numpy() - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy() - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - crepe_hop_length, - f0_file=None, - ): - if ( - file_index != "" - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0( - input_audio_path, - audio_pad, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - inp_f0, - ) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - if self.device == "mps": - pitchf = pitchf.astype(np.float32) - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if resample_sr >= 16000 and tgt_sr != resample_sr: - audio_opt = librosa.resample( - audio_opt, orig_sr=tgt_sr, target_sr=resample_sr - ) - audio_max = np.abs(audio_opt).max() / 0.99 - max_int16 = 32768 - if audio_max > 1: - max_int16 /= audio_max - audio_opt = (audio_opt * max_int16).astype(np.int16) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/User1342/WatchTower/Pinpoint/Grapher.py b/spaces/User1342/WatchTower/Pinpoint/Grapher.py deleted file mode 100644 index 638c1e11f8b082a41b7709b0db8d63dd0400099f..0000000000000000000000000000000000000000 --- a/spaces/User1342/WatchTower/Pinpoint/Grapher.py +++ /dev/null @@ -1,60 +0,0 @@ -import networkx as nx - - -class grapher(): - """ - A wrapper class used for generating a graph for interactions between users - """ - graph = None - - def __init__(self): - """ - Constructor. - """ - self.graph = nx.DiGraph() - - def add_edge_wrapper(self, node_1_name, node_2_name, weight, relationship): - """ - A wrapper function used to add an edge connection or node. - :param node_1_name: from - :param node_2_name: to - :param weight: - :param relationship: - :return: - """ - self.graph.add_edge(node_1_name, node_2_name, weight=weight, relation=relationship) - - def add_node(self, node_name): - """ - A wrapper function that adds a node with no edges to the graph - :param node_name: - """ - self.graph.add_node(node_name) - - def get_info(self): - """ - Retrieves information about the graph - :return: - """ - return nx.info(self.graph) - - def show_graph(self): - """ - Displays the graph - :return: - """ - nx.spring_layout(self.graph) - - def get_degree_centrality_for_user(self, user_name): - """ - Returns the Degree of Centrality for a given user present in the graph - :param user_name: - :return: the Degree of Centrality for a given user present in the graph - """ - centrality = nx.degree_centrality(self.graph) - return centrality[user_name] - - # todo implement - # def get_eigenvector_centrality_for_user(self, user_name): - # centrality = nx.eigenvector_centrality(self.graph) - # return centrality[user_name] diff --git a/spaces/VatsaDev/TinyLlama/README.md b/spaces/VatsaDev/TinyLlama/README.md deleted file mode 100644 index ff054c712afe51a4493d948250b243cf99c902c9..0000000000000000000000000000000000000000 --- a/spaces/VatsaDev/TinyLlama/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: TinyLlama -emoji: 👀 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h b/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h deleted file mode 100644 index c9004bb8043a12e32814436baa6262a00c8ef68e..0000000000000000000000000000000000000000 --- a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h +++ /dev/null @@ -1,433 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libipc/def.h" - -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" -#include "libipc/utility/log.h" -#include "libipc/utility/utility.h" - -namespace ipc { - -//////////////////////////////////////////////////////////////// -/// producer-consumer implementation -//////////////////////////////////////////////////////////////// - -template -struct prod_cons_impl; - -template <> -struct prod_cons_impl> { - - template - struct elem_t { - std::aligned_storage_t data_ {}; - }; - - alignas(cache_line_size) std::atomic rd_; // read index - alignas(cache_line_size) std::atomic wt_; // write index - - constexpr circ::u2_t cursor() const noexcept { - return 0; - } - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed)); - if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) { - return false; // full - } - std::forward(f)(&(elems[cur_wt].data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - /** - * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'. - * So we could just disconnect all connections of receiver, and return false. - */ - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(~static_cast(0u)); - return false; - } - - template - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed)); - if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::forward(f)(&(elems[cur_rd].data_)); - std::forward(out)(true); - rd_.fetch_add(1, std::memory_order_release); - return true; - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - if (circ::index_of(cur_rd) == - circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - using flag_t = std::uint64_t; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - circ::u2_t cur_ct, nxt_ct; - for (unsigned k = 0;;) { - cur_ct = ct_.load(std::memory_order_relaxed); - if (circ::index_of(nxt_ct = cur_ct + 1) == - circ::index_of(rd_.load(std::memory_order_acquire))) { - return false; // full - } - if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - auto* el = elems + circ::index_of(cur_ct); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - while (1) { - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if (cur_ct != wt_.load(std::memory_order_relaxed)) { - return true; - } - if ((~cac_ct) != cur_ct) { - return true; - } - if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) { - return true; - } - wt_.store(nxt_ct, std::memory_order_release); - cur_ct = nxt_ct; - nxt_ct = cur_ct + 1; - el = elems + circ::index_of(cur_ct); - } - return true; - } - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - auto cur_wt = wt_.load(std::memory_order_acquire); - auto id_rd = circ::index_of(cur_rd); - auto id_wt = circ::index_of(cur_wt); - if (id_rd == id_wt) { - auto* el = elems + id_wt; - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if ((~cac_ct) != cur_wt) { - return false; // empty - } - if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) { - wt_.store(cur_wt + 1, std::memory_order_release); - } - k = 0; - } - else { - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - - enum : rc_t { - ep_mask = 0x00000000ffffffffull, - ep_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - }; - - alignas(cache_line_size) std::atomic wt_; // write index - alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer - - circ::u2_t cursor() const noexcept { - return wt_.load(std::memory_order_acquire); - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) { - return false; // has not finished yet - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - epoch_ += ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) { - if (cur == cursor()) return false; // acquire - auto* el = elems + circ::index_of(cur++); - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & ep_mask) == 0) { - std::forward(out)(true); - return true; - } - auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id()); - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)((nxt_rc & ep_mask) == 0); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - using flag_t = std::uint64_t; - - enum : rc_t { - rc_mask = 0x00000000ffffffffull, - ep_mask = 0x00ffffffffffffffull, - ep_incr = 0x0100000000000000ull, - ic_mask = 0xff000000ffffffffull, - ic_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - alignas(cache_line_size) std::atomic epoch_ { 0 }; - - circ::u2_t cursor() const noexcept { - return ct_.load(std::memory_order_acquire); - } - - constexpr static rc_t inc_rc(rc_t rc) noexcept { - return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask); - } - - constexpr static rc_t inc_mask(rc_t rc) noexcept { - return inc_rc(rc) & ~rc_mask; - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.load(std::memory_order_acquire); - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_relaxed); - circ::cc_t rem_cc = cur_rc & rc_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) { - return false; // has not finished yet - } - else if (!rem_cc) { - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if ((cur_fl != cur_ct) && cur_fl) { - return false; // full - } - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) && - epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & rc_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) { - if (epoch == epoch_.load(std::memory_order_acquire)) { - break; - } - else if (push(wrapper, std::forward(f), elems)) { - return true; - } - epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) { - auto* el = elems + circ::index_of(cur); - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if (cur_fl != ~static_cast(cur)) { - return false; // empty - } - ++cur; - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & rc_mask) == 0) { - std::forward(out)(true); - el->f_ct_.store(cur + N - 1, std::memory_order_release); - return true; - } - auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id()); - bool last_one = false; - if ((last_one = (nxt_rc & rc_mask) == 0)) { - el->f_ct_.store(cur + N - 1, std::memory_order_release); - } - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)(last_one); - return true; - } - ipc::yield(k); - } - } -}; - -} // namespace ipc diff --git a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/python/dqn/policies.py b/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/python/dqn/policies.py deleted file mode 100644 index 4ecf39a5fc04b24ad1b809232b186728366987b6..0000000000000000000000000000000000000000 --- a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/python/dqn/policies.py +++ /dev/null @@ -1,237 +0,0 @@ -from typing import Any, Dict, List, Optional, Type - -import gym -import torch as th -from torch import nn - -from stable_baselines3.common.policies import BasePolicy, register_policy -from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp -from stable_baselines3.common.type_aliases import Schedule - - -class QNetwork(BasePolicy): - """ - Action-Value (Q-Value) network for DQN - - :param observation_space: Observation space - :param action_space: Action space - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - features_extractor: nn.Module, - features_dim: int, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - normalize_images: bool = True, - ): - super(QNetwork, self).__init__( - observation_space, - action_space, - features_extractor=features_extractor, - normalize_images=normalize_images, - ) - - if net_arch is None: - net_arch = [64, 64] - - self.net_arch = net_arch - self.activation_fn = activation_fn - self.features_extractor = features_extractor - self.features_dim = features_dim - self.normalize_images = normalize_images - action_dim = self.action_space.n # number of actions - q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn) - self.q_net = nn.Sequential(*q_net) - - def forward(self, obs: th.Tensor) -> th.Tensor: - """ - Predict the q-values. - - :param obs: Observation - :return: The estimated Q-Value for each action. - """ - return self.q_net(self.extract_features(obs)) - - def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor: - q_values = self.forward(observation) - # Greedy action - action = q_values.argmax(dim=1).reshape(-1) - return action - - def _get_constructor_parameters(self) -> Dict[str, Any]: - data = super()._get_constructor_parameters() - - data.update( - dict( - net_arch=self.net_arch, - features_dim=self.features_dim, - activation_fn=self.activation_fn, - features_extractor=self.features_extractor, - ) - ) - return data - - -class DQNPolicy(BasePolicy): - """ - Policy class with Q-Value Net and target net for DQN - - :param observation_space: Observation space - :param action_space: Action space - :param lr_schedule: Learning rate schedule (could be constant) - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param features_extractor_class: Features extractor to use. - :param features_extractor_kwargs: Keyword arguments - to pass to the features extractor. - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - :param optimizer_class: The optimizer to use, - ``th.optim.Adam`` by default - :param optimizer_kwargs: Additional keyword arguments, - excluding the learning rate, to pass to the optimizer - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super(DQNPolicy, self).__init__( - observation_space, - action_space, - features_extractor_class, - features_extractor_kwargs, - optimizer_class=optimizer_class, - optimizer_kwargs=optimizer_kwargs, - ) - - if net_arch is None: - if features_extractor_class == FlattenExtractor: - net_arch = [64, 64] - else: - net_arch = [] - - self.net_arch = net_arch - self.activation_fn = activation_fn - self.normalize_images = normalize_images - - self.net_args = { - "observation_space": self.observation_space, - "action_space": self.action_space, - "net_arch": self.net_arch, - "activation_fn": self.activation_fn, - "normalize_images": normalize_images, - } - - self.q_net, self.q_net_target = None, None - self._build(lr_schedule) - - def _build(self, lr_schedule: Schedule) -> None: - """ - Create the network and the optimizer. - - :param lr_schedule: Learning rate schedule - lr_schedule(1) is the initial learning rate - """ - - self.q_net = self.make_q_net() - self.q_net_target = self.make_q_net() - self.q_net_target.load_state_dict(self.q_net.state_dict()) - - # Setup optimizer with initial learning rate - self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs) - - def make_q_net(self) -> QNetwork: - # Make sure we always have separate networks for features extractors etc - net_args = self._update_features_extractor(self.net_args, features_extractor=None) - return QNetwork(**net_args).to(self.device) - - def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: - return self._predict(obs, deterministic=deterministic) - - def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: - return self.q_net._predict(obs, deterministic=deterministic) - - def _get_constructor_parameters(self) -> Dict[str, Any]: - data = super()._get_constructor_parameters() - - data.update( - dict( - net_arch=self.net_args["net_arch"], - activation_fn=self.net_args["activation_fn"], - lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone - optimizer_class=self.optimizer_class, - optimizer_kwargs=self.optimizer_kwargs, - features_extractor_class=self.features_extractor_class, - features_extractor_kwargs=self.features_extractor_kwargs, - ) - ) - return data - - -MlpPolicy = DQNPolicy - - -class CnnPolicy(DQNPolicy): - """ - Policy class for DQN when using images as input. - - :param observation_space: Observation space - :param action_space: Action space - :param lr_schedule: Learning rate schedule (could be constant) - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param features_extractor_class: Features extractor to use. - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - :param optimizer_class: The optimizer to use, - ``th.optim.Adam`` by default - :param optimizer_kwargs: Additional keyword arguments, - excluding the learning rate, to pass to the optimizer - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super(CnnPolicy, self).__init__( - observation_space, - action_space, - lr_schedule, - net_arch, - activation_fn, - features_extractor_class, - features_extractor_kwargs, - normalize_images, - optimizer_class, - optimizer_kwargs, - ) - - -register_policy("MlpPolicy", MlpPolicy) -register_policy("CnnPolicy", CnnPolicy) diff --git a/spaces/XzJosh/Eileen-Bert-VITS2/resample.py b/spaces/XzJosh/Eileen-Bert-VITS2/resample.py deleted file mode 100644 index 2ed1685654a371c5722168e9987809b05b1cb224..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Eileen-Bert-VITS2/resample.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import argparse -import librosa -import numpy as np -from multiprocessing import Pool, cpu_count - -import soundfile -from scipy.io import wavfile -from tqdm import tqdm - - -def process(item): - spkdir, wav_name, args = item - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - if os.path.exists(wav_path) and '.wav' in wav_path: - os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True) - wav, sr = librosa.load(wav_path, sr=args.sr) - soundfile.write( - os.path.join(args.out_dir, speaker, wav_name), - wav, - sr - ) - - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--sr", type=int, default=44100, help="sampling rate") - parser.add_argument("--in_dir", type=str, default="./raw", help="path to source dir") - parser.add_argument("--out_dir", type=str, default="./dataset", help="path to target dir") - args = parser.parse_args() - # processs = 8 - processs = cpu_count()-2 if cpu_count() >4 else 1 - pool = Pool(processes=processs) - - for speaker in os.listdir(args.in_dir): - spk_dir = os.path.join(args.in_dir, speaker) - if os.path.isdir(spk_dir): - print(spk_dir) - for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])): - pass diff --git a/spaces/YE01/saya-vits/text/symbols.py b/spaces/YE01/saya-vits/text/symbols.py deleted file mode 100644 index 4cf61b81c693fca1f24540c758c13662afc5455e..0000000000000000000000000000000000000000 --- a/spaces/YE01/saya-vits/text/symbols.py +++ /dev/null @@ -1,75 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -'''# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' -''' - -# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' - - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -'''# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' -''' - -'''# sanskrit_cleaners -_pad = '_' -_punctuation = '।' -_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ ' -''' - -'''# cjks_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ ' -''' - -'''# thai_cleaners -_pad = '_' -_punctuation = '.!? ' -_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์' -''' - -'''# cjke_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ ' -''' - -'''# shanghainese_cleaners -_pad = '_' -_punctuation = ',.!?…' -_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 ' -''' - -'''# chinese_dialect_cleaners -_pad = '_' -_punctuation = ',.!?~…─' -_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚ᴀᴇ↑↓∅ⱼ ' -''' - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") diff --git a/spaces/YUANAI/DiffspeechResearch/utils/commons/dataset_utils.py b/spaces/YUANAI/DiffspeechResearch/utils/commons/dataset_utils.py deleted file mode 100644 index 44c2ca0ce3226fa21bf9d7c7fa889b23ef9b0fa9..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/utils/commons/dataset_utils.py +++ /dev/null @@ -1,247 +0,0 @@ -import os -import sys -import traceback -import types -from functools import wraps -from itertools import chain -import numpy as np -import torch.utils.data -from torch.utils.data import ConcatDataset -from utils.commons.hparams import hparams - - -def collate_1d_or_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None, shift_id=1): - if len(values[0].shape) == 1: - return collate_1d(values, pad_idx, left_pad, shift_right, max_len, shift_id) - else: - return collate_2d(values, pad_idx, left_pad, shift_right, max_len) - - -def collate_1d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None, shift_id=1): - """Convert a list of 1d tensors into a padded 2d tensor.""" - size = max(v.size(0) for v in values) if max_len is None else max_len - res = values[0].new(len(values), size).fill_(pad_idx) - - def copy_tensor(src, dst): - assert dst.numel() == src.numel() - if shift_right: - dst[1:] = src[:-1] - dst[0] = shift_id - else: - dst.copy_(src) - - for i, v in enumerate(values): - copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) - return res - - -def collate_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None): - """Convert a list of 2d tensors into a padded 3d tensor.""" - size = max(v.size(0) for v in values) if max_len is None else max_len - res = values[0].new(len(values), size, values[0].shape[1]).fill_(pad_idx) - - def copy_tensor(src, dst): - assert dst.numel() == src.numel() - if shift_right: - dst[1:] = src[:-1] - else: - dst.copy_(src) - - for i, v in enumerate(values): - copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) - return res - - -def _is_batch_full(batch, num_tokens, max_tokens, max_sentences): - if len(batch) == 0: - return 0 - if len(batch) == max_sentences: - return 1 - if num_tokens > max_tokens: - return 1 - return 0 - - -def batch_by_size( - indices, num_tokens_fn, max_tokens=None, max_sentences=None, - required_batch_size_multiple=1, distributed=False -): - """ - Yield mini-batches of indices bucketed by size. Batches may contain - sequences of different lengths. - - Args: - indices (List[int]): ordered list of dataset indices - num_tokens_fn (callable): function that returns the number of tokens at - a given index - max_tokens (int, optional): max number of tokens in each batch - (default: None). - max_sentences (int, optional): max number of sentences in each - batch (default: None). - required_batch_size_multiple (int, optional): require batch size to - be a multiple of N (default: 1). - """ - max_tokens = max_tokens if max_tokens is not None else sys.maxsize - max_sentences = max_sentences if max_sentences is not None else sys.maxsize - bsz_mult = required_batch_size_multiple - - if isinstance(indices, types.GeneratorType): - indices = np.fromiter(indices, dtype=np.int64, count=-1) - - sample_len = 0 - sample_lens = [] - batch = [] - batches = [] - for i in range(len(indices)): - idx = indices[i] - num_tokens = num_tokens_fn(idx) - sample_lens.append(num_tokens) - sample_len = max(sample_len, num_tokens) - - assert sample_len <= max_tokens, ( - "sentence at index {} of size {} exceeds max_tokens " - "limit of {}!".format(idx, sample_len, max_tokens) - ) - num_tokens = (len(batch) + 1) * sample_len - - if _is_batch_full(batch, num_tokens, max_tokens, max_sentences): - mod_len = max( - bsz_mult * (len(batch) // bsz_mult), - len(batch) % bsz_mult, - ) - batches.append(batch[:mod_len]) - batch = batch[mod_len:] - sample_lens = sample_lens[mod_len:] - sample_len = max(sample_lens) if len(sample_lens) > 0 else 0 - batch.append(idx) - if len(batch) > 0: - batches.append(batch) - return batches - - -def unpack_dict_to_list(samples): - samples_ = [] - bsz = samples.get('outputs').size(0) - for i in range(bsz): - res = {} - for k, v in samples.items(): - try: - res[k] = v[i] - except: - pass - samples_.append(res) - return samples_ - - -def remove_padding(x, padding_idx=0): - if x is None: - return None - assert len(x.shape) in [1, 2] - if len(x.shape) == 2: # [T, H] - return x[np.abs(x).sum(-1) != padding_idx] - elif len(x.shape) == 1: # [T] - return x[x != padding_idx] - - -def data_loader(fn): - """ - Decorator to make any fx with this use the lazy property - :param fn: - :return: - """ - - wraps(fn) - attr_name = '_lazy_' + fn.__name__ - - def _get_data_loader(self): - try: - value = getattr(self, attr_name) - except AttributeError: - try: - value = fn(self) # Lazy evaluation, done only once. - except AttributeError as e: - # Guard against AttributeError suppression. (Issue #142) - traceback.print_exc() - error = f'{fn.__name__}: An AttributeError was encountered: ' + str(e) - raise RuntimeError(error) from e - setattr(self, attr_name, value) # Memoize evaluation. - return value - - return _get_data_loader - - -class BaseDataset(torch.utils.data.Dataset): - def __init__(self, shuffle): - super().__init__() - self.hparams = hparams - self.shuffle = shuffle - self.sort_by_len = hparams['sort_by_len'] - self.sizes = None - - @property - def _sizes(self): - return self.sizes - - def __getitem__(self, index): - raise NotImplementedError - - def collater(self, samples): - raise NotImplementedError - - def __len__(self): - return len(self._sizes) - - def num_tokens(self, index): - return self.size(index) - - def size(self, index): - """Return an example's size as a float or tuple. This value is used when - filtering a dataset with ``--max-positions``.""" - return min(self._sizes[index], hparams['max_frames']) - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.shuffle: - indices = np.random.permutation(len(self)) - if self.sort_by_len: - indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')] - else: - indices = np.arange(len(self)) - return indices - - @property - def num_workers(self): - return int(os.getenv('NUM_WORKERS', hparams['ds_workers'])) - - -class BaseConcatDataset(ConcatDataset): - def collater(self, samples): - return self.datasets[0].collater(samples) - - @property - def _sizes(self): - if not hasattr(self, 'sizes'): - self.sizes = list(chain.from_iterable([d._sizes for d in self.datasets])) - return self.sizes - - def size(self, index): - return min(self._sizes[index], hparams['max_frames']) - - def num_tokens(self, index): - return self.size(index) - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.datasets[0].shuffle: - indices = np.random.permutation(len(self)) - if self.datasets[0].sort_by_len: - indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')] - else: - indices = np.arange(len(self)) - return indices - - @property - def num_workers(self): - return self.datasets[0].num_workers diff --git a/spaces/YUANAI/DiffspeechResearch/utils/nn/model_utils.py b/spaces/YUANAI/DiffspeechResearch/utils/nn/model_utils.py deleted file mode 100644 index b81200e9a2629ac4d791a37d31d5f13330aefd30..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/utils/nn/model_utils.py +++ /dev/null @@ -1,14 +0,0 @@ -import numpy as np - - -def print_arch(model, model_name='model'): - print(f"| {model_name} Arch: ", model) - num_params(model, model_name=model_name) - - -def num_params(model, print_out=True, model_name="model"): - parameters = filter(lambda p: p.requires_grad, model.parameters()) - parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 - if print_out: - print(f'| {model_name} Trainable Parameters: %.3fM' % parameters) - return parameters diff --git a/spaces/Yuliang/ECON/lib/pymafx/utils/geometry.py b/spaces/Yuliang/ECON/lib/pymafx/utils/geometry.py deleted file mode 100644 index 5c2d2c1f8b22147344afc549ae74630bea56d3ef..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ECON/lib/pymafx/utils/geometry.py +++ /dev/null @@ -1,693 +0,0 @@ -import numbers - -import numpy as np -import torch -from einops.einops import rearrange -from torch.nn import functional as F - -""" -Useful geometric operations, e.g. Perspective projection and a differentiable Rodrigues formula -Parts of the code are taken from https://github.com/MandyMo/pytorch_HMR -""" - - -def batch_rodrigues(theta): - """Convert axis-angle representation to rotation matrix. - Args: - theta: size = [B, 3] - Returns: - Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] - """ - l1norm = torch.norm(theta + 1e-8, p=2, dim=1) - angle = torch.unsqueeze(l1norm, -1) - normalized = torch.div(theta, angle) - angle = angle * 0.5 - v_cos = torch.cos(angle) - v_sin = torch.sin(angle) - quat = torch.cat([v_cos, v_sin * normalized], dim=1) - return quat_to_rotmat(quat) - - -def quat_to_rotmat(quat): - """Convert quaternion coefficients to rotation matrix. - Args: - quat: size = [B, 4] 4 <===>(w, x, y, z) - Returns: - Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] - """ - norm_quat = quat - norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True) - w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3] - - B = quat.size(0) - - w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) - wx, wy, wz = w * x, w * y, w * z - xy, xz, yz = x * y, x * z, y * z - - rotMat = torch.stack([ - w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, w2 - x2 + y2 - z2, - 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2 - ], - dim=1).view(B, 3, 3) - return rotMat - - -def rotation_matrix_to_angle_axis(rotation_matrix): - """ - This function is borrowed from https://github.com/kornia/kornia - - Convert 3x4 rotation matrix to Rodrigues vector - - Args: - rotation_matrix (Tensor): rotation matrix. - - Returns: - Tensor: Rodrigues vector transformation. - - Shape: - - Input: :math:`(N, 3, 4)` - - Output: :math:`(N, 3)` - - Example: - >>> input = torch.rand(2, 3, 4) # Nx4x4 - >>> output = tgm.rotation_matrix_to_angle_axis(input) # Nx3 - """ - if rotation_matrix.shape[1:] == (3, 3): - rot_mat = rotation_matrix.reshape(-1, 3, 3) - hom = torch.tensor([0, 0, 1], dtype=torch.float32, device=rotation_matrix.device).reshape( - 1, 3, 1 - ).expand(rot_mat.shape[0], -1, -1) - rotation_matrix = torch.cat([rot_mat, hom], dim=-1) - - quaternion = rotation_matrix_to_quaternion(rotation_matrix) - aa = quaternion_to_angle_axis(quaternion) - aa[torch.isnan(aa)] = 0.0 - return aa - - -def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: - """ - This function is borrowed from https://github.com/kornia/kornia - - Convert quaternion vector to angle axis of rotation. - - Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h - - Args: - quaternion (torch.Tensor): tensor with quaternions. - - Return: - torch.Tensor: tensor with angle axis of rotation. - - Shape: - - Input: :math:`(*, 4)` where `*` means, any number of dimensions - - Output: :math:`(*, 3)` - - Example: - >>> quaternion = torch.rand(2, 4) # Nx4 - >>> angle_axis = tgm.quaternion_to_angle_axis(quaternion) # Nx3 - """ - if not torch.is_tensor(quaternion): - raise TypeError("Input type is not a torch.Tensor. Got {}".format(type(quaternion))) - - if not quaternion.shape[-1] == 4: - raise ValueError( - "Input must be a tensor of shape Nx4 or 4. Got {}".format(quaternion.shape) - ) - # unpack input and compute conversion - q1: torch.Tensor = quaternion[..., 1] - q2: torch.Tensor = quaternion[..., 2] - q3: torch.Tensor = quaternion[..., 3] - sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 - - sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) - cos_theta: torch.Tensor = quaternion[..., 0] - two_theta: torch.Tensor = 2.0 * torch.where( - cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta) - ) - - k_pos: torch.Tensor = two_theta / sin_theta - k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) - k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) - - angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] - angle_axis[..., 0] += q1 * k - angle_axis[..., 1] += q2 * k - angle_axis[..., 2] += q3 * k - return angle_axis - - -def quaternion_to_angle(quaternion: torch.Tensor) -> torch.Tensor: - """ - Convert quaternion vector to angle of the rotation. - - Args: - quaternion (torch.Tensor): tensor with quaternions. - - Return: - torch.Tensor: tensor with angle axis of rotation. - - Shape: - - Input: :math:`(*, 4)` where `*` means, any number of dimensions - - Output: :math:`(*, 1)` - - Example: - >>> quaternion = torch.rand(2, 4) # Nx4 - >>> angle_axis = tgm.quaternion_to_angle(quaternion) # Nx1 - """ - if not torch.is_tensor(quaternion): - raise TypeError("Input type is not a torch.Tensor. Got {}".format(type(quaternion))) - - if not quaternion.shape[-1] == 4: - raise ValueError( - "Input must be a tensor of shape Nx4 or 4. Got {}".format(quaternion.shape) - ) - # unpack input and compute conversion - q1: torch.Tensor = quaternion[..., 1] - q2: torch.Tensor = quaternion[..., 2] - q3: torch.Tensor = quaternion[..., 3] - sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 - - sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) - cos_theta: torch.Tensor = quaternion[..., 0] - theta: torch.Tensor = 2.0 * torch.where( - cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta) - ) - - # theta: torch.Tensor = 2.0 * torch.atan2(sin_theta, cos_theta) - - # theta2 = torch.where(sin_squared_theta > 0.0, - theta, theta) - - return theta.unsqueeze(-1) - - -def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6): - """ - This function is borrowed from https://github.com/kornia/kornia - - Convert 3x4 rotation matrix to 4d quaternion vector - - This algorithm is based on algorithm described in - https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201 - - Args: - rotation_matrix (Tensor): the rotation matrix to convert. - - Return: - Tensor: the rotation in quaternion - - Shape: - - Input: :math:`(N, 3, 4)` - - Output: :math:`(N, 4)` - - Example: - >>> input = torch.rand(4, 3, 4) # Nx3x4 - >>> output = tgm.rotation_matrix_to_quaternion(input) # Nx4 - """ - if not torch.is_tensor(rotation_matrix): - raise TypeError("Input type is not a torch.Tensor. Got {}".format(type(rotation_matrix))) - - if len(rotation_matrix.shape) > 3: - raise ValueError( - "Input size must be a three dimensional tensor. Got {}".format(rotation_matrix.shape) - ) - # if not rotation_matrix.shape[-2:] == (3, 4): - # raise ValueError( - # "Input size must be a N x 3 x 4 tensor. Got {}".format( - # rotation_matrix.shape)) - - rmat_t = torch.transpose(rotation_matrix, 1, 2) - - mask_d2 = rmat_t[:, 2, 2] < eps - - mask_d0_d1 = rmat_t[:, 0, 0] > rmat_t[:, 1, 1] - mask_d0_nd1 = rmat_t[:, 0, 0] < -rmat_t[:, 1, 1] - - t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2] - q0 = torch.stack([ - rmat_t[:, 1, 2] - rmat_t[:, 2, 1], t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0], - rmat_t[:, 2, 0] + rmat_t[:, 0, 2] - ], -1) - t0_rep = t0.repeat(4, 1).t() - - t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2] - q1 = torch.stack([ - rmat_t[:, 2, 0] - rmat_t[:, 0, 2], rmat_t[:, 0, 1] + rmat_t[:, 1, 0], t1, - rmat_t[:, 1, 2] + rmat_t[:, 2, 1] - ], -1) - t1_rep = t1.repeat(4, 1).t() - - t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2] - q2 = torch.stack([ - rmat_t[:, 0, 1] - rmat_t[:, 1, 0], rmat_t[:, 2, 0] + rmat_t[:, 0, 2], - rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2 - ], -1) - t2_rep = t2.repeat(4, 1).t() - - t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2] - q3 = torch.stack([ - t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1], rmat_t[:, 2, 0] - rmat_t[:, 0, 2], - rmat_t[:, 0, 1] - rmat_t[:, 1, 0] - ], -1) - t3_rep = t3.repeat(4, 1).t() - - mask_c0 = mask_d2 * mask_d0_d1 - mask_c1 = mask_d2 * ~mask_d0_d1 - mask_c2 = ~mask_d2 * mask_d0_nd1 - mask_c3 = ~mask_d2 * ~mask_d0_nd1 - mask_c0 = mask_c0.view(-1, 1).type_as(q0) - mask_c1 = mask_c1.view(-1, 1).type_as(q1) - mask_c2 = mask_c2.view(-1, 1).type_as(q2) - mask_c3 = mask_c3.view(-1, 1).type_as(q3) - - q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3 - q /= torch.sqrt( - t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa - t2_rep * mask_c2 + t3_rep * mask_c3 - ) # noqa - q *= 0.5 - return q - - -def batch_euler2matrix(r): - return quaternion_to_rotation_matrix(euler_to_quaternion(r)) - - -def euler_to_quaternion(r): - x = r[..., 0] - y = r[..., 1] - z = r[..., 2] - - z = z / 2.0 - y = y / 2.0 - x = x / 2.0 - cz = torch.cos(z) - sz = torch.sin(z) - cy = torch.cos(y) - sy = torch.sin(y) - cx = torch.cos(x) - sx = torch.sin(x) - quaternion = torch.zeros_like(r.repeat(1, 2))[..., :4].to(r.device) - quaternion[..., 0] += cx * cy * cz - sx * sy * sz - quaternion[..., 1] += cx * sy * sz + cy * cz * sx - quaternion[..., 2] += cx * cz * sy - sx * cy * sz - quaternion[..., 3] += cx * cy * sz + sx * cz * sy - return quaternion - - -def quaternion_to_rotation_matrix(quat): - """Convert quaternion coefficients to rotation matrix. - Args: - quat: size = [B, 4] 4 <===>(w, x, y, z) - Returns: - Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] - """ - norm_quat = quat - norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True) - w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3] - - B = quat.size(0) - - w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) - wx, wy, wz = w * x, w * y, w * z - xy, xz, yz = x * y, x * z, y * z - - rotMat = torch.stack([ - w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, w2 - x2 + y2 - z2, - 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2 - ], - dim=1).view(B, 3, 3) - return rotMat - - -def rot6d_to_rotmat(x): - """Convert 6D rotation representation to 3x3 rotation matrix. - Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 - Input: - (B,6) Batch of 6-D rotation representations - Output: - (B,3,3) Batch of corresponding rotation matrices - """ - if x.shape[-1] == 6: - batch_size = x.shape[0] - if len(x.shape) == 3: - num = x.shape[1] - x = rearrange(x, 'b n d -> (b n) d', d=6) - else: - num = 1 - x = rearrange(x, 'b (k l) -> b k l', k=3, l=2) - # x = x.view(-1,3,2) - a1 = x[:, :, 0] - a2 = x[:, :, 1] - b1 = F.normalize(a1) - b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1) - b3 = torch.cross(b1, b2, dim=-1) - - mat = torch.stack((b1, b2, b3), dim=-1) - if num > 1: - mat = rearrange(mat, '(b n) h w-> b n h w', b=batch_size, n=num, h=3, w=3) - else: - x = x.view(-1, 3, 2) - a1 = x[:, :, 0] - a2 = x[:, :, 1] - b1 = F.normalize(a1) - b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1) - b3 = torch.cross(b1, b2, dim=-1) - mat = torch.stack((b1, b2, b3), dim=-1) - return mat - - -def rotmat_to_rot6d(x): - """Convert 3x3 rotation matrix to 6D rotation representation. - Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 - Input: - (B,3,3) Batch of corresponding rotation matrices - Output: - (B,6) Batch of 6-D rotation representations - """ - batch_size = x.shape[0] - x = x[:, :, :2] - x = x.reshape(batch_size, 6) - return x - - -def rotmat_to_angle(x): - """Convert rotation to one-D angle. - Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 - Input: - (B,2) Batch of corresponding rotation - Output: - (B,1) Batch of 1-D angle - """ - a = F.normalize(x) - angle = torch.atan2(a[:, 0], a[:, 1]).unsqueeze(-1) - - return angle - - -def projection(pred_joints, pred_camera, retain_z=False, iwp_mode=True): - """ Project 3D points on the image plane based on the given camera info, - Identity rotation and Weak Perspective (IWP) camera is used when iwp_mode=True, more about camera settings: - SPEC: Seeing People in the Wild with an Estimated Camera, ICCV 2021 - """ - - batch_size = pred_joints.shape[0] - if iwp_mode: - cam_sxy = pred_camera['cam_sxy'] - pred_cam_t = torch.stack([ - cam_sxy[:, 1], cam_sxy[:, 2], 2 * 5000. / (224. * cam_sxy[:, 0] + 1e-9) - ], - dim=-1) - - camera_center = torch.zeros(batch_size, 2) - pred_keypoints_2d = perspective_projection( - pred_joints, - rotation=torch.eye(3).unsqueeze(0).expand(batch_size, -1, -1).to(pred_joints.device), - translation=pred_cam_t, - focal_length=5000., - camera_center=camera_center, - retain_z=retain_z - ) - # # Normalize keypoints to [-1,1] - # pred_keypoints_2d = pred_keypoints_2d / (224. / 2.) - else: - assert type(pred_camera) is dict - - bbox_scale, bbox_center = pred_camera['bbox_scale'], pred_camera['bbox_center'] - img_w, img_h, crop_res = pred_camera['img_w'], pred_camera['img_h'], pred_camera['crop_res'] - cam_sxy, cam_rotmat, cam_intrinsics = pred_camera['cam_sxy'], pred_camera[ - 'cam_rotmat'], pred_camera['cam_intrinsics'] - if 'cam_t' in pred_camera: - cam_t = pred_camera['cam_t'] - else: - cam_t = convert_to_full_img_cam( - pare_cam=cam_sxy, - bbox_height=bbox_scale * 200., - bbox_center=bbox_center, - img_w=img_w, - img_h=img_h, - focal_length=cam_intrinsics[:, 0, 0], - ) - - pred_keypoints_2d = perspective_projection( - pred_joints, - rotation=cam_rotmat, - translation=cam_t, - cam_intrinsics=cam_intrinsics, - ) - - return pred_keypoints_2d - - -def perspective_projection( - points, - rotation, - translation, - focal_length=None, - camera_center=None, - cam_intrinsics=None, - retain_z=False -): - """ - This function computes the perspective projection of a set of points. - Input: - points (bs, N, 3): 3D points - rotation (bs, 3, 3): Camera rotation - translation (bs, 3): Camera translation - focal_length (bs,) or scalar: Focal length - camera_center (bs, 2): Camera center - """ - batch_size = points.shape[0] - if cam_intrinsics is not None: - K = cam_intrinsics - else: - # raise - K = torch.zeros([batch_size, 3, 3], device=points.device) - K[:, 0, 0] = focal_length - K[:, 1, 1] = focal_length - K[:, 2, 2] = 1. - K[:, :-1, -1] = camera_center - - # Transform points - points = torch.einsum('bij,bkj->bki', rotation, points) - points = points + translation.unsqueeze(1) - - # Apply perspective distortion - projected_points = points / points[:, :, -1].unsqueeze(-1) - - # Apply camera intrinsics - projected_points = torch.einsum('bij,bkj->bki', K, projected_points) - - if retain_z: - return projected_points - else: - return projected_points[:, :, :-1] - - -def convert_to_full_img_cam(pare_cam, bbox_height, bbox_center, img_w, img_h, focal_length): - # Converts weak perspective camera estimated by PARE in - # bbox coords to perspective camera in full image coordinates - # from https://arxiv.org/pdf/2009.06549.pdf - s, tx, ty = pare_cam[:, 0], pare_cam[:, 1], pare_cam[:, 2] - res = 224 - r = bbox_height / res - tz = 2 * focal_length / (r * res * s) - - cx = 2 * (bbox_center[:, 0] - (img_w / 2.)) / (s * bbox_height) - cy = 2 * (bbox_center[:, 1] - (img_h / 2.)) / (s * bbox_height) - - if torch.is_tensor(pare_cam): - cam_t = torch.stack([tx + cx, ty + cy, tz], dim=-1) - else: - cam_t = np.stack([tx + cx, ty + cy, tz], axis=-1) - - return cam_t - - -def estimate_translation_np(S, joints_2d, joints_conf, focal_length=5000, img_size=(224., 224.)): - """Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d. - Input: - S: (25, 3) 3D joint locations - joints: (25, 3) 2D joint locations and confidence - Returns: - (3,) camera translation vector - """ - - num_joints = S.shape[0] - # focal length - f = np.array([focal_length, focal_length]) - # optical center - center = np.array([img_size[1] / 2., img_size[0] / 2.]) - - # transformations - Z = np.reshape(np.tile(S[:, 2], (2, 1)).T, -1) - XY = np.reshape(S[:, 0:2], -1) - O = np.tile(center, num_joints) - F = np.tile(f, num_joints) - weight2 = np.reshape(np.tile(np.sqrt(joints_conf), (2, 1)).T, -1) - - # least squares - Q = np.array([ - F * np.tile(np.array([1, 0]), num_joints), F * np.tile(np.array([0, 1]), num_joints), - O - np.reshape(joints_2d, -1) - ]).T - c = (np.reshape(joints_2d, -1) - O) * Z - F * XY - - # weighted least squares - W = np.diagflat(weight2) - Q = np.dot(W, Q) - c = np.dot(W, c) - - # square matrix - A = np.dot(Q.T, Q) - b = np.dot(Q.T, c) - - # solution - trans = np.linalg.solve(A, b) - - return trans - - -def estimate_translation(S, joints_2d, focal_length=5000., img_size=224., use_all_kps=False): - """Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d. - Input: - S: (B, 49, 3) 3D joint locations - joints: (B, 49, 3) 2D joint locations and confidence - Returns: - (B, 3) camera translation vectors - """ - if isinstance(focal_length, numbers.Number): - focal_length = [ - focal_length, - ] * S.shape[0] - # print(len(focal_length), focal_length) - - if isinstance(img_size, numbers.Number): - img_size = [ - (img_size, img_size), - ] * S.shape[0] - # print(len(img_size), img_size) - - device = S.device - if use_all_kps: - S = S.cpu().numpy() - joints_2d = joints_2d.cpu().numpy() - else: - # Use only joints 25:49 (GT joints) - S = S[:, 25:, :].cpu().numpy() - joints_2d = joints_2d[:, 25:, :].cpu().numpy() - joints_conf = joints_2d[:, :, -1] - joints_2d = joints_2d[:, :, :-1] - trans = np.zeros((S.shape[0], 3), dtype=np.float32) - # Find the translation for each example in the batch - for i in range(S.shape[0]): - S_i = S[i] - joints_i = joints_2d[i] - conf_i = joints_conf[i] - trans[i] = estimate_translation_np( - S_i, joints_i, conf_i, focal_length=focal_length[i], img_size=img_size[i] - ) - return torch.from_numpy(trans).to(device) - - -def Rot_y(angle, category='torch', prepend_dim=True, device=None): - '''Rotate around y-axis by angle - Args: - category: 'torch' or 'numpy' - prepend_dim: prepend an extra dimension - Return: Rotation matrix with shape [1, 3, 3] (prepend_dim=True) - ''' - m = np.array([[np.cos(angle), 0., np.sin(angle)], [0., 1., 0.], - [-np.sin(angle), 0., np.cos(angle)]]) - if category == 'torch': - if prepend_dim: - return torch.tensor(m, dtype=torch.float, device=device).unsqueeze(0) - else: - return torch.tensor(m, dtype=torch.float, device=device) - elif category == 'numpy': - if prepend_dim: - return np.expand_dims(m, 0) - else: - return m - else: - raise ValueError("category must be 'torch' or 'numpy'") - - -def Rot_x(angle, category='torch', prepend_dim=True, device=None): - '''Rotate around x-axis by angle - Args: - category: 'torch' or 'numpy' - prepend_dim: prepend an extra dimension - Return: Rotation matrix with shape [1, 3, 3] (prepend_dim=True) - ''' - m = np.array([[1., 0., 0.], [0., np.cos(angle), -np.sin(angle)], - [0., np.sin(angle), np.cos(angle)]]) - if category == 'torch': - if prepend_dim: - return torch.tensor(m, dtype=torch.float, device=device).unsqueeze(0) - else: - return torch.tensor(m, dtype=torch.float, device=device) - elif category == 'numpy': - if prepend_dim: - return np.expand_dims(m, 0) - else: - return m - else: - raise ValueError("category must be 'torch' or 'numpy'") - - -def Rot_z(angle, category='torch', prepend_dim=True, device=None): - '''Rotate around z-axis by angle - Args: - category: 'torch' or 'numpy' - prepend_dim: prepend an extra dimension - Return: Rotation matrix with shape [1, 3, 3] (prepend_dim=True) - ''' - m = np.array([[np.cos(angle), -np.sin(angle), 0.], [np.sin(angle), - np.cos(angle), 0.], [0., 0., 1.]]) - if category == 'torch': - if prepend_dim: - return torch.tensor(m, dtype=torch.float, device=device).unsqueeze(0) - else: - return torch.tensor(m, dtype=torch.float, device=device) - elif category == 'numpy': - if prepend_dim: - return np.expand_dims(m, 0) - else: - return m - else: - raise ValueError("category must be 'torch' or 'numpy'") - - -def compute_twist_rotation(rotation_matrix, twist_axis): - ''' - Compute the twist component of given rotation and twist axis - https://stackoverflow.com/questions/3684269/component-of-a-quaternion-rotation-around-an-axis - Parameters - ---------- - rotation_matrix : Tensor (B, 3, 3,) - The rotation to convert - twist_axis : Tensor (B, 3,) - The twist axis - Returns - ------- - Tensor (B, 3, 3) - The twist rotation - ''' - quaternion = rotation_matrix_to_quaternion(rotation_matrix) - - twist_axis = twist_axis / (torch.norm(twist_axis, dim=1, keepdim=True) + 1e-9) - - projection = torch.einsum('bi,bi->b', twist_axis, quaternion[:, 1:]).unsqueeze(-1) * twist_axis - - twist_quaternion = torch.cat([quaternion[:, 0:1], projection], dim=1) - twist_quaternion = twist_quaternion / (torch.norm(twist_quaternion, dim=1, keepdim=True) + 1e-9) - - twist_rotation = quaternion_to_rotation_matrix(twist_quaternion) - twist_aa = quaternion_to_angle_axis(twist_quaternion) - - twist_angle = torch.sum(twist_aa, dim=1, - keepdim=True) / torch.sum(twist_axis, dim=1, keepdim=True) - - return twist_rotation, twist_angle diff --git a/spaces/a7med146235/Ahmed/app.py b/spaces/a7med146235/Ahmed/app.py deleted file mode 100644 index 6d5f9ad1c1bb798f03c2ac5d4df09bc3bb26eb1c..0000000000000000000000000000000000000000 --- a/spaces/a7med146235/Ahmed/app.py +++ /dev/null @@ -1,22 +0,0 @@ -import gradio as gr -from fastai.vision.all import * -import skimage -import pathlib -temp = pathlib.WindowsPath -pathlib.WindowsPath = pathlib.PosixPath - -learn= load_learner('Bear Model.pkl') -def predict(img): - #img = PILImage.create(img) - pred,pred_idx,probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} - -examples = ['taddy.jfif', 'black.jfif','grizzly.jfif'] -labels = learn.dls.vocab -title = "Bear Classifier" -description = "Bear classifier with fastai. Created as a demo for Gradio and HuggingFace Spaces." -interpretation='default' -enable_queue=True - - -gr.Interface(fn=predict,inputs=gr.inputs.Image(shape=(192, 192)),outputs=gr.outputs.Label(num_top_classes=3),title=title,description=description,interpretation=interpretation,enable_queue=enable_queue,examples=examples).launch() \ No newline at end of file diff --git a/spaces/aaboutblankk/digiplay-CamelliaMix_NSFW_diffusers_v1.1/app.py b/spaces/aaboutblankk/digiplay-CamelliaMix_NSFW_diffusers_v1.1/app.py deleted file mode 100644 index 6eddb50941d72c6a8a339fd251ca0d9d7e499823..0000000000000000000000000000000000000000 --- a/spaces/aaboutblankk/digiplay-CamelliaMix_NSFW_diffusers_v1.1/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/digiplay/CamelliaMix_NSFW_diffusers_v1.1").launch() \ No newline at end of file diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/__init__.py deleted file mode 100644 index ce2930f62a0091e06b37575b96db2ae51ca7908e..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -import mmcv - -from .version import __version__, short_version - - -def digit_version(version_str): - digit_version = [] - for x in version_str.split('.'): - if x.isdigit(): - digit_version.append(int(x)) - elif x.find('rc') != -1: - patch_version = x.split('rc') - digit_version.append(int(patch_version[0]) - 1) - digit_version.append(int(patch_version[1])) - return digit_version - - -mmcv_minimum_version = '1.2.4' -mmcv_maximum_version = '1.4.0' -mmcv_version = digit_version(mmcv.__version__) - - -assert (mmcv_version >= digit_version(mmcv_minimum_version) - and mmcv_version <= digit_version(mmcv_maximum_version)), \ - f'MMCV=={mmcv.__version__} is used but incompatible. ' \ - f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' - -__all__ = ['__version__', 'short_version'] diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/input/base.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/input/base.py deleted file mode 100644 index c99f00fa7a0fd67ea475685bdb10f26388713610..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/input/base.py +++ /dev/null @@ -1,1169 +0,0 @@ -"""Interface classes for `pyglet.input`. - -.. versionadded:: 1.2 -""" - -import sys - -from pyglet.event import EventDispatcher - - -_is_pyglet_doc_run = hasattr(sys, "is_pyglet_doc_run") and sys.is_pyglet_doc_run - - -class DeviceException(Exception): - pass - - -class DeviceOpenException(DeviceException): - pass - - -class DeviceExclusiveException(DeviceException): - pass - - -class Device: - """Input device. - - :Ivariables: - display : `pyglet.canvas.Display` - Display this device is connected to. - name : str - Name of the device, as described by the device firmware. - manufacturer : str - Name of the device manufacturer, or ``None`` if the information is - not available. - """ - - def __init__(self, display, name): - self.display = display - self.name = name - self.manufacturer = None - self._is_open = False - - @property - def is_open(self): - return self._is_open - - def open(self, window=None, exclusive=False): - """Open the device to begin receiving input from it. - - :Parameters: - `window` : Window - Optional window to associate with the device. The behaviour - of this parameter is device and operating system dependant. - It can usually be omitted for most devices. - `exclusive` : bool - If ``True`` the device will be opened exclusively so that no - other application can use it. The method will raise - `DeviceExclusiveException` if the device cannot be opened this - way (for example, because another application has already - opened it). - """ - - if self._is_open: - raise DeviceOpenException('Device is already open.') - - self._is_open = True - - def close(self): - """Close the device. """ - self._is_open = False - - def get_controls(self): - """Get a list of controls provided by the device. - - :rtype: list of `Control` - """ - raise NotImplementedError('abstract') - - def get_guid(self): - """Get the device GUID, in SDL2 format. - - Return a str containing a unique device identification - string. This is generated from the hardware identifiers, - and is in the same format as was popularized by SDL2. - GUIDs differ between platforms, but are generally 32 - hexidecimal characters. - - :rtype: str containing the device's GUID. - """ - raise NotImplementedError('abstract') - - def __repr__(self): - return f"{self.__class__.__name__}(name={self.name})" - - -class Control(EventDispatcher): - """Single value input provided by a device. - - A control's value can be queried when the device is open. Event handlers - can be attached to the control to be called when the value changes. - - The `min` and `max` properties are provided as advertised by the - device; in some cases the control's value will be outside this range. - - :Ivariables: - `name` : str - Name of the control, or ``None`` if unknown - `raw_name` : str - Unmodified name of the control, as presented by the operating - system; or ``None`` if unknown. - `inverted` : bool - If ``True``, the value reported is actually inverted from what the - device reported; usually this is to provide consistency across - operating systems. - """ - - def __init__(self, name, raw_name=None, inverted=False): - self.name = name - self.raw_name = raw_name - self.inverted = inverted - self._value = None - - @property - def value(self): - """Current value of the control. - - The range of the value is device-dependent; for absolute controls - the range is given by ``min`` and ``max`` (however the value may exceed - this range); for relative controls the range is undefined. - - :type: float - """ - return self._value - - @value.setter - def value(self, newvalue): - if newvalue == self._value: - return - self._value = newvalue - self.dispatch_event('on_change', newvalue) - - def __repr__(self): - if self.name: - return f"{self.__class__.__name__}(name={self.name}, raw_name={self.raw_name})" - else: - return f"{self.__class__.__name__}(raw_name={self.raw_name})" - - def on_change(self, value): - """The value changed. - - :Parameters: - `value` : float - Current value of the control. - - :event: - """ - - -Control.register_event_type('on_change') - - -class RelativeAxis(Control): - """An axis whose value represents a relative change from the previous - value. - """ - - #: Name of the horizontal axis control - X = 'x' - #: Name of the vertical axis control - Y = 'y' - #: Name of the Z axis control. - Z = 'z' - #: Name of the rotational-X axis control - RX = 'rx' - #: Name of the rotational-Y axis control - RY = 'ry' - #: Name of the rotational-Z axis control - RZ = 'rz' - #: Name of the scroll wheel control - WHEEL = 'wheel' - - @property - def value(self): - return self._value - - @value.setter - def value(self, value): - self._value = value - self.dispatch_event('on_change', value) - - -class AbsoluteAxis(Control): - """An axis whose value represents a physical measurement from the device. - - The value is advertised to range over ``minimum`` and ``maximum``. - - :Ivariables: - `minimum` : float - Minimum advertised value. - `maximum` : float - Maximum advertised value. - """ - - #: Name of the horizontal axis control - X = 'x' - #: Name of the vertical axis control - Y = 'y' - #: Name of the Z axis control. - Z = 'z' - #: Name of the rotational-X axis control - RX = 'rx' - #: Name of the rotational-Y axis control - RY = 'ry' - #: Name of the rotational-Z axis control - RZ = 'rz' - #: Name of the hat (POV) control, when a single control enumerates all of - #: the hat's positions. - HAT = 'hat' - #: Name of the hat's (POV's) horizontal control, when the hat position is - #: described by two orthogonal controls. - HAT_X = 'hat_x' - #: Name of the hat's (POV's) vertical control, when the hat position is - #: described by two orthogonal controls. - HAT_Y = 'hat_y' - - def __init__(self, name, minimum, maximum, raw_name=None, inverted=False): - super().__init__(name, raw_name, inverted) - self.min = minimum - self.max = maximum - - -class Button(Control): - """A control whose value is boolean. """ - - @property - def value(self): - return bool(self._value) - - @value.setter - def value(self, newvalue): - if newvalue == self._value: - return - self._value = newvalue - self.dispatch_event('on_change', bool(newvalue)) - if newvalue: - self.dispatch_event('on_press') - else: - self.dispatch_event('on_release') - - if _is_pyglet_doc_run: - def on_press(self): - """The button was pressed. - - :event: - """ - - def on_release(self): - """The button was released. - - :event: - """ - - -Button.register_event_type('on_press') -Button.register_event_type('on_release') - - -class Joystick(EventDispatcher): - """High-level interface for joystick-like devices. This includes a wide range - of analog and digital joysticks, gamepads, controllers, and possibly even - steering wheels and other input devices. There is unfortunately no easy way to - distinguish between most of these different device types. - - For a simplified subset of Joysticks, see the :py:class:`~pyglet.input.Controller` - interface. This covers a variety of popular game console controllers. Unlike - Joysticks, Controllers have strictly defined layouts and inputs. - - To use a joystick, first call `open`, then in your game loop examine - the values of `x`, `y`, and so on. These values are normalized to the - range [-1.0, 1.0]. - - To receive events when the value of an axis changes, attach an - on_joyaxis_motion event handler to the joystick. The :py:class:`~pyglet.input.Joystick` instance, - axis name, and current value are passed as parameters to this event. - - To handle button events, you should attach on_joybutton_press and - on_joy_button_release event handlers to the joystick. Both the :py:class:`~pyglet.input.Joystick` - instance and the index of the changed button are passed as parameters to - these events. - - Alternately, you may attach event handlers to each individual button in - `button_controls` to receive on_press or on_release events. - - To use the hat switch, attach an on_joyhat_motion event handler to the - joystick. The handler will be called with both the hat_x and hat_y values - whenever the value of the hat switch changes. - - The device name can be queried to get the name of the joystick. - - :Ivariables: - `device` : `Device` - The underlying device used by this joystick interface. - `x` : float - Current X (horizontal) value ranging from -1.0 (left) to 1.0 - (right). - `y` : float - Current y (vertical) value ranging from -1.0 (top) to 1.0 - (bottom). - `z` : float - Current Z value ranging from -1.0 to 1.0. On joysticks the Z - value is usually the throttle control. On controllers the Z - value is usually the secondary thumb vertical axis. - `rx` : float - Current rotational X value ranging from -1.0 to 1.0. - `ry` : float - Current rotational Y value ranging from -1.0 to 1.0. - `rz` : float - Current rotational Z value ranging from -1.0 to 1.0. On joysticks - the RZ value is usually the twist of the stick. On game - controllers the RZ value is usually the secondary thumb horizontal - axis. - `hat_x` : int - Current hat (POV) horizontal position; one of -1 (left), 0 - (centered) or 1 (right). - `hat_y` : int - Current hat (POV) vertical position; one of -1 (bottom), 0 - (centered) or 1 (top). - `buttons` : list of bool - List of boolean values representing current states of the buttons. - These are in order, so that button 1 has value at ``buttons[0]``, - and so on. - `x_control` : `AbsoluteAxis` - Underlying control for `x` value, or ``None`` if not available. - `y_control` : `AbsoluteAxis` - Underlying control for `y` value, or ``None`` if not available. - `z_control` : `AbsoluteAxis` - Underlying control for `z` value, or ``None`` if not available. - `rx_control` : `AbsoluteAxis` - Underlying control for `rx` value, or ``None`` if not available. - `ry_control` : `AbsoluteAxis` - Underlying control for `ry` value, or ``None`` if not available. - `rz_control` : `AbsoluteAxis` - Underlying control for `rz` value, or ``None`` if not available. - `hat_x_control` : `AbsoluteAxis` - Underlying control for `hat_x` value, or ``None`` if not available. - `hat_y_control` : `AbsoluteAxis` - Underlying control for `hat_y` value, or ``None`` if not available. - `button_controls` : list of `Button` - Underlying controls for `buttons` values. - """ - - def __init__(self, device): - self.device = device - - self.x = 0 - self.y = 0 - self.z = 0 - self.rx = 0 - self.ry = 0 - self.rz = 0 - self.hat_x = 0 - self.hat_y = 0 - self.buttons = [] - - self.x_control = None - self.y_control = None - self.z_control = None - self.rx_control = None - self.ry_control = None - self.rz_control = None - self.hat_x_control = None - self.hat_y_control = None - self.button_controls = [] - - def add_axis(control): - name = control.name - scale = 2.0 / (control.max - control.min) - bias = -1.0 - control.min * scale - if control.inverted: - scale = -scale - bias = -bias - setattr(self, name + '_control', control) - - @control.event - def on_change(value): - normalized_value = value * scale + bias - setattr(self, name, normalized_value) - self.dispatch_event('on_joyaxis_motion', self, name, normalized_value) - - def add_button(control): - i = len(self.buttons) - self.buttons.append(False) - self.button_controls.append(control) - - @control.event - def on_change(value): - self.buttons[i] = value - - @control.event - def on_press(): - self.dispatch_event('on_joybutton_press', self, i) - - @control.event - def on_release(): - self.dispatch_event('on_joybutton_release', self, i) - - def add_hat(control): - # 8-directional hat encoded as a single control (Windows/Mac) - self.hat_x_control = control - self.hat_y_control = control - - @control.event - def on_change(value): - if value & 0xffff == 0xffff: - self.hat_x = self.hat_y = 0 - else: - if control.max > 8: # DirectInput: scale value - value //= 0xfff - if 0 <= value < 8: - self.hat_x, self.hat_y = (( 0, 1), - ( 1, 1), - ( 1, 0), - ( 1, -1), - ( 0, -1), - (-1, -1), - (-1, 0), - (-1, 1))[value] - else: - # Out of range - self.hat_x = self.hat_y = 0 - self.dispatch_event('on_joyhat_motion', self, self.hat_x, self.hat_y) - - for control in device.get_controls(): - if isinstance(control, AbsoluteAxis): - if control.name in ('x', 'y', 'z', 'rx', 'ry', 'rz', 'hat_x', 'hat_y'): - add_axis(control) - elif control.name == 'hat': - add_hat(control) - elif isinstance(control, Button): - add_button(control) - - def open(self, window=None, exclusive=False): - """Open the joystick device. See `Device.open`. """ - self.device.open(window, exclusive) - - def close(self): - """Close the joystick device. See `Device.close`. """ - self.device.close() - - def on_joyaxis_motion(self, joystick, axis, value): - """The value of a joystick axis changed. - - :Parameters: - `joystick` : `Joystick` - The joystick device whose axis changed. - `axis` : string - The name of the axis that changed. - `value` : float - The current value of the axis, normalized to [-1, 1]. - """ - - def on_joybutton_press(self, joystick, button): - """A button on the joystick was pressed. - - :Parameters: - `joystick` : `Joystick` - The joystick device whose button was pressed. - `button` : int - The index (in `button_controls`) of the button that was pressed. - """ - - def on_joybutton_release(self, joystick, button): - """A button on the joystick was released. - - :Parameters: - `joystick` : `Joystick` - The joystick device whose button was released. - `button` : int - The index (in `button_controls`) of the button that was released. - """ - - def on_joyhat_motion(self, joystick, hat_x, hat_y): - """The value of the joystick hat switch changed. - - :Parameters: - `joystick` : `Joystick` - The joystick device whose hat control changed. - `hat_x` : int - Current hat (POV) horizontal position; one of -1 (left), 0 - (centered) or 1 (right). - `hat_y` : int - Current hat (POV) vertical position; one of -1 (bottom), 0 - (centered) or 1 (top). - """ - - def __repr__(self): - return f"Joystick(device={self.device.name})" - - -Joystick.register_event_type('on_joyaxis_motion') -Joystick.register_event_type('on_joybutton_press') -Joystick.register_event_type('on_joybutton_release') -Joystick.register_event_type('on_joyhat_motion') - - -class Controller(EventDispatcher): - - __slots__ = ('device', 'guid', '_mapping', 'name', 'a', 'b', 'x', 'y', - 'back', 'start', 'guide', 'leftshoulder', 'rightshoulder', - 'leftstick', 'rightstick', 'lefttrigger', 'righttrigger', - 'leftx', 'lefty', 'rightx', 'righty', 'dpup', 'dpdown', 'dpleft', - 'dpright', '_button_controls', '_axis_controls', '_hat_control', - '_hat_x_control', '_hat_y_control') - - def __init__(self, device, mapping): - """High-level interface for Game Controllers. - - Unlike Joysticks, Controllers have a strictly defined set of inputs - that matches the layout of popular home video game console Controllers. - This includes a variety of face and shoulder buttons, analog sticks and - triggers, a directional pad, and optional rumble (force feedback) - effects. - - To use a Controller, you must first call `open`. Controllers will then - dispatch a variety of events whenever the inputs change. They can also - be polled at any time to find the current value of any inputs. Analog - inputs are normalized to the range [-1.0, 1.0]. - - :note: A running application event loop is required - - The following event types are dispatched: - `on_button_press` - `on_button_release` - `on_stick_motion` - `on_dpad_motion` - `on_trigger_motion` - - The device name can be queried to get the name of the joystick. - - :Ivariables: - `device` : `Device` - The underlying device used by this joystick interface. - `name` : str - The name of the Controller as reported by the OS. - `guid` : str - The unique device identification string, in SDL2 format. - `a` : bool - `b` : bool - `x` : bool - `x` : bool - `back` : bool - `start` : bool - `guide` : bool - `leftshoulder` : bool - `rightshoulder` : bool - `leftstick` : bool - `rightstick` : bool - `leftx` : float - `lefty` : float - `rightx` : float - `righty` : float - `lefttrigger` : float - `righttrigger` : float - `dpup` : bool - `dpdown` : bool - `dpleft` : bool - `dpright` : bool - - .. versionadded:: 2.0 - """ - - self.device = device - self._mapping = mapping - - self.name = mapping.get('name') - self.guid = mapping.get('guid') - - self.a = False - self.b = False - self.x = False - self.y = False - self.back = False - self.start = False - self.guide = False - self.leftshoulder = False - self.rightshoulder = False - self.leftstick = False # stick press button - self.rightstick = False # stick press button - self.lefttrigger = 0 - self.righttrigger = 0 - self.leftx = 0 - self.lefty = 0 - self.rightx = 0 - self.righty = 0 - self.dpup = False - self.dpdown = False - self.dpleft = False - self.dpright = False - - self._button_controls = [] - self._axis_controls = [] - self._hat_control = None - self._hat_x_control = None - self._hat_y_control = None - - self._initialize_controls() - - def _initialize_controls(self): - - def add_axis(control, axis_name): - tscale = 1.0 / (control.max - control.min) - scale = 2.0 / (control.max - control.min) - bias = -1.0 - control.min * scale - if control.inverted: - scale = -scale - bias = -bias - - if axis_name in ("dpup", "dpdown"): - @control.event - def on_change(value): - normalized_value = value * scale + bias - self.dpup = self.dpdown = False - if normalized_value > 0.1: - self.dpup = True - if normalized_value < -0.1: - self.dpdown = True - self.dispatch_event('on_dpad_motion', self, - self.dpleft, self.dpright, self.dpup, self.dpdown) - - elif axis_name in ("dpleft", "dpright"): - @control.event - def on_change(value): - normalized_value = value * scale + bias - self.dpleft = self.dpright = False - if normalized_value > 0.1: - self.dpright = True - if normalized_value < -0.1: - self.dpleft = True - self.dispatch_event('on_dpad_motion', self, self.dpleft, self.dpright, self.dpup, self.dpdown) - - elif axis_name in ("lefttrigger", "righttrigger"): - @control.event - def on_change(value): - normalized_value = value * tscale - setattr(self, axis_name, normalized_value) - self.dispatch_event('on_trigger_motion', self, axis_name, normalized_value) - - elif axis_name in ("leftx", "lefty"): - @control.event - def on_change(value): - normalized_value = value * scale + bias - setattr(self, axis_name, normalized_value) - self.dispatch_event('on_stick_motion', self, - "leftstick", self.leftx, -self.lefty) - - elif axis_name in ("rightx", "righty"): - @control.event - def on_change(value): - normalized_value = value * scale + bias - setattr(self, axis_name, normalized_value) - self.dispatch_event('on_stick_motion', self, - "rightstick", self.rightx, -self.righty) - - def add_button(control, button_name): - if button_name in ("dpleft", "dpright", "dpup", "dpdown"): - @control.event - def on_change(value): - setattr(self, button_name, value) - self.dispatch_event('on_dpad_motion', self, - self.dpleft, self.dpright, self.dpup, self.dpdown) - else: - @control.event - def on_change(value): - setattr(self, button_name, value) - - @control.event - def on_press(): - self.dispatch_event('on_button_press', self, button_name) - - @control.event - def on_release(): - self.dispatch_event('on_button_release', self, button_name) - - def add_dedicated_hat(control): - # 8-directional hat encoded as a single control (Windows/Mac) - @control.event - def on_change(value): - if value & 0xffff == 0xffff: - self.dpleft = self.dpright = self.dpup = self.dpdown = False - else: - if control.max > 8: # DirectInput: scale value - value //= 0xfff - if 0 <= value < 8: - self.dpleft, self.dpright, self.dpup, self.dpdown = ( - (False, False, True, False), - (False, True, True, False), - (False, True, False, False), - (False, True, False, True), - (False, False, False, True), - (True, False, False, True), - (True, False, False, False), - (True, False, True, False))[value] - else: - # Out of range - self.dpleft = self.dpright = self.dpup = self.dpdown = False - self.dispatch_event('on_dpad_motion', self, - self.dpleft, self.dpright, self.dpup, self.dpdown) - - for control in self.device.get_controls(): - """Categorize the various control types""" - if isinstance(control, Button): - self._button_controls.append(control) - - elif isinstance(control, AbsoluteAxis): - if control.name in ('x', 'y', 'z', 'rx', 'ry', 'rz'): - self._axis_controls.append(control) - elif control.name == "hat_x": - self._hat_x_control = control - elif control.name == "hat_y": - self._hat_y_control = control - elif control.name == "hat": - self._hat_control = control - - for name, relation in self._mapping.items(): - - if relation is None or type(relation) is str: - continue - - if relation.control_type == "button": - try: - add_button(self._button_controls[relation.index], name) - except IndexError: - continue - elif relation.control_type == "axis": - try: - add_axis(self._axis_controls[relation.index], name) - except IndexError: - continue - elif relation.control_type == "hat0": - if self._hat_control: - # TODO: test this on Windows/Mac. - add_dedicated_hat(self._hat_control) - else: - if relation.index == 1: # 1 == UP - add_axis(self._hat_y_control, "dpup") - elif relation.index == 2: # 2 == RIGHT - add_axis(self._hat_x_control, "dpright") - elif relation.index == 4: # 4 == DOWN - add_axis(self._hat_y_control, "dpdown") - elif relation.index == 8: # 8 == LEFT - add_axis(self._hat_x_control, "dpleft") - - def open(self, window=None, exclusive=False): - """Open the controller. See `Device.open`. """ - self.device.open(window, exclusive) - - def close(self): - """Close the controller. See `Device.close`. """ - self.device.close() - - # Rumble (force feedback) methods: - - def rumble_play_weak(self, strength=1.0, duration=0.5): - """Play a rumble effect on the weak motor. - - :Parameters: - `strength` : float - The strength of the effect, from 0 to 1. - `duration` : float - The duration of the effect in seconds. - """ - - def rumble_play_strong(self, strength=1.0, duration=0.5): - """Play a rumble effect on the strong motor. - - :Parameters: - `strength` : float - The strength of the effect, from 0 to 1. - `duration` : float - The duration of the effect in seconds. - """ - - def rumble_stop_weak(self): - """Stop playing rumble effects on the weak motor.""" - - def rumble_stop_strong(self): - """Stop playing rumble effects on the strong motor.""" - - # Input Event types: - - def on_stick_motion(self, controller, stick, xvalue, yvalue): - """The value of a controller analogue stick changed. - - :Parameters: - `controller` : `Controller` - The controller whose analogue stick changed. - `stick` : string - The name of the stick that changed. - `xvalue` : float - The current X axis value, normalized to [-1, 1]. - `yvalue` : float - The current Y axis value, normalized to [-1, 1]. - """ - - def on_dpad_motion(self, controller, dpleft, dpright, dpup, dpdown): - """The direction pad of the controller changed. - - :Parameters: - `controller` : `Controller` - The controller whose hat control changed. - `dpleft` : boolean - True if left is pressed on the directional pad. - `dpright` : boolean - True if right is pressed on the directional pad. - `dpup` : boolean - True if up is pressed on the directional pad. - `dpdown` : boolean - True if down is pressed on the directional pad. - """ - - def on_trigger_motion(self, controller, trigger, value): - """The value of a controller analogue stick changed. - - :Parameters: - `controller` : `Controller` - The controller whose analogue stick changed. - `trigger` : string - The name of the trigger that changed. - `value` : float - The current value of the trigger, normalized to [-1, 1]. - """ - - def on_button_press(self, controller, button): - """A button on the controller was pressed. - - :Parameters: - `controller` : :py:class:`Controller` - The controller whose button was pressed. - `button` : string - The name of the button that was pressed. - """ - - def on_button_release(self, controller, button): - """A button on the joystick was released. - - :Parameters: - `controller` : `Controller` - The controller whose button was released. - `button` : string - The name of the button that was released. - """ - - def __repr__(self): - return f"Controller(name={self.name})" - - -Controller.register_event_type('on_button_press') -Controller.register_event_type('on_button_release') -Controller.register_event_type('on_stick_motion') -Controller.register_event_type('on_dpad_motion') -Controller.register_event_type('on_trigger_motion') - - -class AppleRemote(EventDispatcher): - """High-level interface for Apple remote control. - - This interface provides access to the 6 button controls on the remote. - Pressing and holding certain buttons on the remote is interpreted as - a separate control. - - :Ivariables: - `device` : `Device` - The underlying device used by this interface. - `left_control` : `Button` - Button control for the left (prev) button. - `left_hold_control` : `Button` - Button control for holding the left button (rewind). - `right_control` : `Button` - Button control for the right (next) button. - `right_hold_control` : `Button` - Button control for holding the right button (fast forward). - `up_control` : `Button` - Button control for the up (volume increase) button. - `down_control` : `Button` - Button control for the down (volume decrease) button. - `select_control` : `Button` - Button control for the select (play/pause) button. - `select_hold_control` : `Button` - Button control for holding the select button. - `menu_control` : `Button` - Button control for the menu button. - `menu_hold_control` : `Button` - Button control for holding the menu button. - """ - - def __init__(self, device): - def add_button(control): - setattr(self, control.name + '_control', control) - - @control.event - def on_press(): - self.dispatch_event('on_button_press', control.name) - - @control.event - def on_release(): - self.dispatch_event('on_button_release', control.name) - - self.device = device - for control in device.get_controls(): - if control.name in ('left', 'left_hold', 'right', 'right_hold', 'up', 'down', - 'menu', 'select', 'menu_hold', 'select_hold'): - add_button(control) - - def open(self, window=None, exclusive=False): - """Open the device. See `Device.open`. """ - self.device.open(window, exclusive) - - def close(self): - """Close the device. See `Device.close`. """ - self.device.close() - - def on_button_press(self, button): - """A button on the remote was pressed. - - Only the 'up' and 'down' buttons will generate an event when the - button is first pressed. All other buttons on the remote will wait - until the button is released and then send both the press and release - events at the same time. - - :Parameters: - `button` : unicode - The name of the button that was pressed. The valid names are - 'up', 'down', 'left', 'right', 'left_hold', 'right_hold', - 'menu', 'menu_hold', 'select', and 'select_hold' - - :event: - """ - - def on_button_release(self, button): - """A button on the remote was released. - - The 'select_hold' and 'menu_hold' button release events are sent - immediately after the corresponding press events regardless of - whether the user has released the button. - - :Parameters: - `button` : str - The name of the button that was released. The valid names are - 'up', 'down', 'left', 'right', 'left_hold', 'right_hold', - 'menu', 'menu_hold', 'select', and 'select_hold' - - :event: - """ - - -AppleRemote.register_event_type('on_button_press') -AppleRemote.register_event_type('on_button_release') - - -class Tablet: - """High-level interface to tablet devices. - - Unlike other devices, tablets must be opened for a specific window, - and cannot be opened exclusively. The `open` method returns a - `TabletCanvas` object, which supports the events provided by the tablet. - - Currently only one tablet device can be used, though it can be opened on - multiple windows. If more than one tablet is connected, the behaviour is - undefined. - """ - - def open(self, window): - """Open a tablet device for a window. - - :Parameters: - `window` : `Window` - The window on which the tablet will be used. - - :rtype: `TabletCanvas` - """ - raise NotImplementedError('abstract') - - -class TabletCanvas(EventDispatcher): - """Event dispatcher for tablets. - - Use `Tablet.open` to obtain this object for a particular tablet device and - window. Events may be generated even if the tablet stylus is outside of - the window; this is operating-system dependent. - - The events each provide the `TabletCursor` that was used to generate the - event; for example, to distinguish between a stylus and an eraser. Only - one cursor can be used at a time, otherwise the results are undefined. - - :Ivariables: - `window` : Window - The window on which this tablet was opened. - """ - # OS X: Active window receives tablet events only when cursor is in window - # Windows: Active window receives all tablet events - # - # Note that this means enter/leave pairs are not always consistent (normal - # usage). - - def __init__(self, window): - self.window = window - - def close(self): - """Close the tablet device for this window. - """ - raise NotImplementedError('abstract') - - if _is_pyglet_doc_run: - def on_enter(self, cursor): - """A cursor entered the proximity of the window. The cursor may - be hovering above the tablet surface, but outside of the window - bounds, or it may have entered the window bounds. - - Note that you cannot rely on `on_enter` and `on_leave` events to - be generated in pairs; some events may be lost if the cursor was - out of the window bounds at the time. - - :Parameters: - `cursor` : `TabletCursor` - The cursor that entered proximity. - - :event: - """ - - def on_leave(self, cursor): - """A cursor left the proximity of the window. The cursor may have - moved too high above the tablet surface to be detected, or it may - have left the bounds of the window. - - Note that you cannot rely on `on_enter` and `on_leave` events to - be generated in pairs; some events may be lost if the cursor was - out of the window bounds at the time. - - :Parameters: - `cursor` : `TabletCursor` - The cursor that left proximity. - - :event: - """ - - def on_motion(self, cursor, x, y, pressure, tilt_x, tilt_y, buttons): - """The cursor moved on the tablet surface. - - If `pressure` is 0, then the cursor is actually hovering above the - tablet surface, not in contact. - - :Parameters: - `cursor` : `TabletCursor` - The cursor that moved. - `x` : int - The X position of the cursor, in window coordinates. - `y` : int - The Y position of the cursor, in window coordinates. - `pressure` : float - The pressure applied to the cursor, in range 0.0 (no - pressure) to 1.0 (full pressure). - `tilt_x` : float - Currently undefined. - `tilt_y` : float - Currently undefined. - `buttons` : int - Button state may be provided if the platform supports it. - Supported on: Windows - - :event: - """ - - -TabletCanvas.register_event_type('on_enter') -TabletCanvas.register_event_type('on_leave') -TabletCanvas.register_event_type('on_motion') - - -class TabletCursor: - """A distinct cursor used on a tablet. - - Most tablets support at least a *stylus* and an *erasor* cursor; this - object is used to distinguish them when tablet events are generated. - - :Ivariables: - `name` : str - Name of the cursor. - """ - - # TODO well-defined names for stylus and eraser. - - def __init__(self, name): - self.name = name - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, self.name) - - -class ControllerManager(EventDispatcher): - """High level interface for managing game Controllers. - - This class provides a convenient way to handle the - connection and disconnection of devices. A list of all - connected Controllers can be queried at any time with the - `get_controllers` method. For hot-plugging, events are - dispatched for `on_connect` and `on_disconnect`. - To use the ControllerManager, first make an instance:: - - controller_man = pyglet.input.ControllerManager() - - At the start of your game, query for any Controllers - that are already connected:: - - controllers = controller_man.get_controllers() - - To handle Controllers that are connected or disconnected - after the start of your game, register handlers for the - appropriate events:: - - @controller_man.event - def on_connect(controller): - # code to handle newly connected - # (or re-connected) controllers - controller.open() - print("Connect:", controller) - - @controller_man.event - def on_disconnect(controller): - # code to handle disconnected Controller - print("Disconnect:", controller) - - .. versionadded:: 1.2 - """ - - def get_controllers(self): - """Get a list of all connected Controllers - - :rtype: list of :py:class:`Controller` - """ - raise NotImplementedError - - def on_connect(self, controller): - """A Controller has been connected. If this is - a previously dissconnected Controller that is - being re-connected, the same Controller instance - will be returned. - - :Parameters: - `controller` : :py:class:`Controller` - An un-opened Controller instance. - - :event: - """ - - def on_disconnect(self, controller): - """A Controller has been disconnected. - - :Parameters: - `controller` : :py:class:`Controller` - An un-opened Controller instance. - - :event: - """ - - -ControllerManager.register_event_type('on_connect') -ControllerManager.register_event_type('on_disconnect') diff --git a/spaces/abyildirim/inst-inpaint/constants.py b/spaces/abyildirim/inst-inpaint/constants.py deleted file mode 100644 index eba3c629b0cd43f2b12c8b6bce5cd97f1be6b81d..0000000000000000000000000000000000000000 --- a/spaces/abyildirim/inst-inpaint/constants.py +++ /dev/null @@ -1,25 +0,0 @@ -TITLE = "Inst-Inpaint: Instructing to Remove Objects with Diffusion Models" - -DESCRIPTION = """ -

      - Project Page | - Paper | - GitHub Repo | -

      -

      - This demo demonstrates the Inst-Inpaint's abilities for instruction-based image inpainting. -

      -""" - -EXAMPLES = [ - ["examples/kite-boy.png", "Remove the colorful kite", True], - ["examples/cat-car.jpg", "Remove the car", True], - ["examples/bus-tree.jpg", "Remove the red bus", True], - ["examples/cups.webp", "Remove the cup at the left", True], - ["examples/woman-fantasy.jpg", "Remove the woman", True], - ["examples/clock.png", "Remove the round clock at the center", True], - ["examples/woman.png", "Remove the woman at the left", True], - ["examples/men.png", "Remove the man at the right", True], - ["examples/tree.png", "Remove the tree", True], - ["examples/birds.png", "Remove the bird at the right of the bird", True] -] diff --git a/spaces/ajsda/newbing/Dockerfile b/spaces/ajsda/newbing/Dockerfile deleted file mode 100644 index 3698c7cb7938e025afc53b18a571ae2961fbdffe..0000000000000000000000000000000000000000 --- a/spaces/ajsda/newbing/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -# 添加 git,以便之后能从GitHub克隆项目 -RUN apk --no-cache add git - -# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app - -# 设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app - -# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# 使用轻量级的 alpine 镜像作为运行时的基础镜像 -FROM alpine - -# 设置工作目录 -WORKDIR /workspace/app - -# 从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . - -# 设置环境变量,此处为随机字符 -ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX5rG6bE3fZ4iO" - -# 暴露8080端口 -EXPOSE 8080 - -# 容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] \ No newline at end of file diff --git a/spaces/akhaliq/Detic/detic/modeling/utils.py b/spaces/akhaliq/Detic/detic/modeling/utils.py deleted file mode 100644 index 297fb469a049d3df2a4aa730e09c9919b4c4ca3c..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Detic/detic/modeling/utils.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import torch -import json -import numpy as np -from torch.nn import functional as F - -def load_class_freq( - path='datasets/metadata/lvis_v1_train_cat_info.json', freq_weight=1.0): - cat_info = json.load(open(path, 'r')) - cat_info = torch.tensor( - [c['image_count'] for c in sorted(cat_info, key=lambda x: x['id'])]) - freq_weight = cat_info.float() ** freq_weight - return freq_weight - - -def get_fed_loss_inds(gt_classes, num_sample_cats, C, weight=None): - appeared = torch.unique(gt_classes) # C' - prob = appeared.new_ones(C + 1).float() - prob[-1] = 0 - if len(appeared) < num_sample_cats: - if weight is not None: - prob[:C] = weight.float().clone() - prob[appeared] = 0 - more_appeared = torch.multinomial( - prob, num_sample_cats - len(appeared), - replacement=False) - appeared = torch.cat([appeared, more_appeared]) - return appeared - - - -def reset_cls_test(model, cls_path, num_classes): - model.roi_heads.num_classes = num_classes - if type(cls_path) == str: - print('Resetting zs_weight', cls_path) - zs_weight = torch.tensor( - np.load(cls_path), - dtype=torch.float32).permute(1, 0).contiguous() # D x C - else: - zs_weight = cls_path - zs_weight = torch.cat( - [zs_weight, zs_weight.new_zeros((zs_weight.shape[0], 1))], - dim=1) # D x (C + 1) - if model.roi_heads.box_predictor[0].cls_score.norm_weight: - zs_weight = F.normalize(zs_weight, p=2, dim=0) - zs_weight = zs_weight.to(model.device) - for k in range(len(model.roi_heads.box_predictor)): - del model.roi_heads.box_predictor[k].cls_score.zs_weight - model.roi_heads.box_predictor[k].cls_score.zs_weight = zs_weight \ No newline at end of file diff --git a/spaces/akhaliq/Music_Source_Separation/bytesep/optimizers/__init__.py b/spaces/akhaliq/Music_Source_Separation/bytesep/optimizers/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/akhaliq/Music_Source_Separation/scripts/1_pack_audios_to_hdf5s/instruments_solo/symphony/sr=44100,chn=2.sh b/spaces/akhaliq/Music_Source_Separation/scripts/1_pack_audios_to_hdf5s/instruments_solo/symphony/sr=44100,chn=2.sh deleted file mode 100644 index b3ba173d0a84ec895cfa6c131c75fa469191eb96..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Music_Source_Separation/scripts/1_pack_audios_to_hdf5s/instruments_solo/symphony/sr=44100,chn=2.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -INSTRUMENTS_SOLO_DATASET_DIR=${1:-"./datasets/instruments_solo"} # The first argument is dataset directory. -WORKSPACE=${2:-"./workspaces/bytesep"} # The second argument is workspace directory. - -echo "INSTRUMENTS_SOLO_DATASET_DIR=${INSTRUMENTS_SOLO_DATASET_DIR}" -echo "WORKSPACE=${WORKSPACE}" - -# Users can change the following settings. -SAMPLE_RATE=44100 -CHANNELS=2 - -INSTRUMENT="symphony" - -# Paths -SUB_DATASET_DIR="${INSTRUMENTS_SOLO_DATASET_DIR}/${INSTRUMENT}_solo/v0.1" - -HDF5S_DIR="${WORKSPACE}/hdf5s/instruments_solo/${INSTRUMENT}/sr=${SAMPLE_RATE}_chn=${CHANNELS}/train" - -python3 bytesep/dataset_creation/pack_audios_to_hdf5s/instruments_solo.py \ - --dataset_dir=$SUB_DATASET_DIR \ - --split="train" \ - --source_type=$INSTRUMENT \ - --hdf5s_dir=$HDF5S_DIR \ - --sample_rate=$SAMPLE_RATE \ - --channels=$CHANNELS \ No newline at end of file diff --git a/spaces/akiraaaaaa/Waifu-Reina/infer_pack/commons.py b/spaces/akiraaaaaa/Waifu-Reina/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/akiraaaaaa/Waifu-Reina/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/alan-chen-intel/dagan-demo/depth/depth_decoder.py b/spaces/alan-chen-intel/dagan-demo/depth/depth_decoder.py deleted file mode 100644 index efbdaf73ee199f8d0ca7a6b75b29f82b1711c56a..0000000000000000000000000000000000000000 --- a/spaces/alan-chen-intel/dagan-demo/depth/depth_decoder.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright Niantic 2019. Patent Pending. All rights reserved. -# -# This software is licensed under the terms of the Monodepth2 licence -# which allows for non-commercial use only, the full terms of which are made -# available in the LICENSE file. - -from __future__ import absolute_import, division, print_function - -import numpy as np -import torch -import torch.nn as nn - -from collections import OrderedDict -from depth.layers import * - - -class DepthDecoder(nn.Module): - def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True): - super(DepthDecoder, self).__init__() - - self.num_output_channels = num_output_channels - self.use_skips = use_skips - self.upsample_mode = 'nearest' - self.scales = scales - - self.num_ch_enc = num_ch_enc - self.num_ch_dec = np.array([16, 32, 64, 128, 256]) - - # decoder - self.convs = OrderedDict() - for i in range(4, -1, -1): - # upconv_0 - num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1] - num_ch_out = self.num_ch_dec[i] - self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out) - - # upconv_1 - num_ch_in = self.num_ch_dec[i] - if self.use_skips and i > 0: - num_ch_in += self.num_ch_enc[i - 1] - num_ch_out = self.num_ch_dec[i] - self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out) - - for s in self.scales: - self.convs[("dispconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels) - - self.decoder = nn.ModuleList(list(self.convs.values())) - self.sigmoid = nn.Sigmoid() - - def forward(self, input_features): - self.outputs = {} - - # decoder - x = input_features[-1] - for i in range(4, -1, -1): - x = self.convs[("upconv", i, 0)](x) - x = [upsample(x)] - if self.use_skips and i > 0: - x += [input_features[i - 1]] - x = torch.cat(x, 1) - x = self.convs[("upconv", i, 1)](x) - if i in self.scales: - self.outputs[("disp", i)] = self.sigmoid(self.convs[("dispconv", i)](x)) - - return self.outputs diff --git a/spaces/algomuffin/jojo_fork/e4e/scripts/inference.py b/spaces/algomuffin/jojo_fork/e4e/scripts/inference.py deleted file mode 100644 index 185b9b34db85dcd97b9793bd5dbfc9d1ca046549..0000000000000000000000000000000000000000 --- a/spaces/algomuffin/jojo_fork/e4e/scripts/inference.py +++ /dev/null @@ -1,133 +0,0 @@ -import argparse - -import torch -import numpy as np -import sys -import os -import dlib - -sys.path.append(".") -sys.path.append("..") - -from configs import data_configs, paths_config -from datasets.inference_dataset import InferenceDataset -from torch.utils.data import DataLoader -from utils.model_utils import setup_model -from utils.common import tensor2im -from utils.alignment import align_face -from PIL import Image - - -def main(args): - net, opts = setup_model(args.ckpt, device) - is_cars = 'cars_' in opts.dataset_type - generator = net.decoder - generator.eval() - args, data_loader = setup_data_loader(args, opts) - - # Check if latents exist - latents_file_path = os.path.join(args.save_dir, 'latents.pt') - if os.path.exists(latents_file_path): - latent_codes = torch.load(latents_file_path).to(device) - else: - latent_codes = get_all_latents(net, data_loader, args.n_sample, is_cars=is_cars) - torch.save(latent_codes, latents_file_path) - - if not args.latents_only: - generate_inversions(args, generator, latent_codes, is_cars=is_cars) - - -def setup_data_loader(args, opts): - dataset_args = data_configs.DATASETS[opts.dataset_type] - transforms_dict = dataset_args['transforms'](opts).get_transforms() - images_path = args.images_dir if args.images_dir is not None else dataset_args['test_source_root'] - print(f"images path: {images_path}") - align_function = None - if args.align: - align_function = run_alignment - test_dataset = InferenceDataset(root=images_path, - transform=transforms_dict['transform_test'], - preprocess=align_function, - opts=opts) - - data_loader = DataLoader(test_dataset, - batch_size=args.batch, - shuffle=False, - num_workers=2, - drop_last=True) - - print(f'dataset length: {len(test_dataset)}') - - if args.n_sample is None: - args.n_sample = len(test_dataset) - return args, data_loader - - -def get_latents(net, x, is_cars=False): - codes = net.encoder(x) - if net.opts.start_from_latent_avg: - if codes.ndim == 2: - codes = codes + net.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :] - else: - codes = codes + net.latent_avg.repeat(codes.shape[0], 1, 1) - if codes.shape[1] == 18 and is_cars: - codes = codes[:, :16, :] - return codes - - -def get_all_latents(net, data_loader, n_images=None, is_cars=False): - all_latents = [] - i = 0 - with torch.no_grad(): - for batch in data_loader: - if n_images is not None and i > n_images: - break - x = batch - inputs = x.to(device).float() - latents = get_latents(net, inputs, is_cars) - all_latents.append(latents) - i += len(latents) - return torch.cat(all_latents) - - -def save_image(img, save_dir, idx): - result = tensor2im(img) - im_save_path = os.path.join(save_dir, f"{idx:05d}.jpg") - Image.fromarray(np.array(result)).save(im_save_path) - - -@torch.no_grad() -def generate_inversions(args, g, latent_codes, is_cars): - print('Saving inversion images') - inversions_directory_path = os.path.join(args.save_dir, 'inversions') - os.makedirs(inversions_directory_path, exist_ok=True) - for i in range(args.n_sample): - imgs, _ = g([latent_codes[i].unsqueeze(0)], input_is_latent=True, randomize_noise=False, return_latents=True) - if is_cars: - imgs = imgs[:, :, 64:448, :] - save_image(imgs[0], inversions_directory_path, i + 1) - - -def run_alignment(image_path): - predictor = dlib.shape_predictor(paths_config.model_paths['shape_predictor']) - aligned_image = align_face(filepath=image_path, predictor=predictor) - print("Aligned image has shape: {}".format(aligned_image.size)) - return aligned_image - - -if __name__ == "__main__": - device = "cuda" - - parser = argparse.ArgumentParser(description="Inference") - parser.add_argument("--images_dir", type=str, default=None, - help="The directory of the images to be inverted") - parser.add_argument("--save_dir", type=str, default=None, - help="The directory to save the latent codes and inversion images. (default: images_dir") - parser.add_argument("--batch", type=int, default=1, help="batch size for the generator") - parser.add_argument("--n_sample", type=int, default=None, help="number of the samples to infer.") - parser.add_argument("--latents_only", action="store_true", help="infer only the latent codes of the directory") - parser.add_argument("--align", action="store_true", help="align face images before inference") - parser.add_argument("ckpt", metavar="CHECKPOINT", help="path to generator checkpoint") - - args = parser.parse_args() - main(args) diff --git a/spaces/ali-ghamdan/realesrgan-models/README.md b/spaces/ali-ghamdan/realesrgan-models/README.md deleted file mode 100644 index 4e28aca998b6ebf0d4957d506c903cca339a85c2..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/realesrgan-models/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: realesrgan-tests -emoji: 🏃 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.1.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/allen-eric/radiology-gpt/app.py b/spaces/allen-eric/radiology-gpt/app.py deleted file mode 100644 index 98c4c974f0737e8e3aeaca6570f2a4390edd2cf0..0000000000000000000000000000000000000000 --- a/spaces/allen-eric/radiology-gpt/app.py +++ /dev/null @@ -1,165 +0,0 @@ -import torch -from peft import PeftModel -import transformers -import gradio as gr - -assert ( - "LlamaTokenizer" in transformers._import_structure["models.llama"] -), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git" -from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig - -tokenizer = LlamaTokenizer.from_pretrained("chavinlo/alpaca-native") - -BASE_MODEL = "chavinlo/alpaca-native" -LORA_WEIGHTS = "allen-eric/lora-alpaca-7b-radiology-full" - -if torch.cuda.is_available(): - device = "cuda" -else: - device = "cpu" - -try: - if torch.backends.mps.is_available(): - device = "mps" -except: - pass - -if device == "cuda": - model = LlamaForCausalLM.from_pretrained( - BASE_MODEL, - load_in_8bit=False, - torch_dtype=torch.float16, - device_map="auto", - ) - model = PeftModel.from_pretrained( - model, LORA_WEIGHTS, torch_dtype=torch.float16, force_download=True - ) -elif device == "mps": - model = LlamaForCausalLM.from_pretrained( - BASE_MODEL, - device_map={"": device}, - torch_dtype=torch.float16, - ) - model = PeftModel.from_pretrained( - model, - LORA_WEIGHTS, - device_map={"": device}, - torch_dtype=torch.float16, - ) -else: - model = LlamaForCausalLM.from_pretrained( - BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True - ) - model = PeftModel.from_pretrained( - model, - LORA_WEIGHTS, - device_map={"": device}, - ) - - -def generate_prompt(instruction, input=None): - if input: - return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. - -### Instruction: -{instruction} - -### Input: -{input} - -### Response:""" - else: - return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. - -### Instruction: -{instruction} - -### Response:""" - -if device != "cpu": - model.half() -model.eval() -if torch.__version__ >= "2": - model = torch.compile(model) - - -def evaluate( - instruction, - input=None, - temperature=0.1, - top_p=0.75, - top_k=40, - num_beams=4, - max_new_tokens=128, - **kwargs, -): - prompt = generate_prompt(instruction, input) - inputs = tokenizer(prompt, return_tensors="pt") - input_ids = inputs["input_ids"].to(device) - generation_config = GenerationConfig( - temperature=temperature, - top_p=top_p, - top_k=top_k, - num_beams=num_beams, - **kwargs, - ) - with torch.no_grad(): - generation_output = model.generate( - input_ids=input_ids, - generation_config=generation_config, - return_dict_in_generate=True, - output_scores=True, - max_new_tokens=max_new_tokens, - ) - s = generation_output.sequences[0] - output = tokenizer.decode(s) - return output.split("### Response:")[1].strip() - - -g = gr.Interface( - fn=evaluate, - inputs=[ - gr.components.Textbox( - lines=2, label="Instruction", placeholder="Tell me about alpacas." - ), - gr.components.Textbox(lines=2, label="Input", placeholder="none"), - gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"), - gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"), - gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"), - gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label="Beams"), - gr.components.Slider( - minimum=1, maximum=512, step=1, value=128, label="Max tokens" - ), - ], - outputs=[ - gr.inputs.Textbox( - lines=5, - label="Output", - ) - ], - title="Radiology-GPT", - description="", -) -g.queue(concurrency_count=1) -g.launch() - -# Old testing code follows. - -""" -if __name__ == "__main__": - # testing code for readme - for instruction in [ - "Tell me about alpacas.", - "Tell me about the president of Mexico in 2019.", - "Tell me about the king of France in 2019.", - "List all Canadian provinces in alphabetical order.", - "Write a Python program that prints the first 10 Fibonacci numbers.", - "Write a program that prints the numbers from 1 to 100. But for multiples of three print 'Fizz' instead of the number and for the multiples of five print 'Buzz'. For numbers which are multiples of both three and five print 'FizzBuzz'.", - "Tell me five words that rhyme with 'shock'.", - "Translate the sentence 'I have no mouth but I must scream' into Spanish.", - "Count up from 1 to 500.", - ]: - print("Instruction:", instruction) - print("Response:", evaluate(instruction)) - print() -""" diff --git a/spaces/allknowingroger/New-Image-Models-Testing/README.md b/spaces/allknowingroger/New-Image-Models-Testing/README.md deleted file mode 100644 index 4e2caf10db78863e7a5f0de0280f30aef294b675..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/New-Image-Models-Testing/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: New Image Models -emoji: 👁 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test ---- - - \ No newline at end of file diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/qa/paqa_errs.c b/spaces/amarchheda/ChordDuplicate/portaudio/qa/paqa_errs.c deleted file mode 100644 index 8d4094f94c3ae3700af5e4e678bc98ae987ab30d..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/qa/paqa_errs.c +++ /dev/null @@ -1,403 +0,0 @@ -/** @file paqa_errs.c - @ingroup qa_src - @brief Self Testing Quality Assurance app for PortAudio - Do lots of bad things to test error reporting. - @author Phil Burk http://www.softsynth.com - Pieter Suurmond adapted to V19 API. -*/ -/* - * $Id$ - * - * This program uses the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include - -#include "portaudio.h" - -/*--------- Definitions ---------*/ -#define MODE_INPUT (0) -#define MODE_OUTPUT (1) -#define FRAMES_PER_BUFFER (64) -#define SAMPLE_RATE (44100.0) - -typedef struct PaQaData -{ - unsigned long framesLeft; - int numChannels; - int bytesPerSample; - int mode; -} -PaQaData; - -static int gNumPassed = 0; /* Two globals */ -static int gNumFailed = 0; - -/*------------------- Macros ------------------------------*/ -/* Print ERROR if it fails. Tally success or failure. Odd */ -/* do-while wrapper seems to be needed for some compilers. */ - -#define EXPECT(_exp) \ - do \ - { \ - if ((_exp)) {\ - gNumPassed++; \ - } \ - else { \ - printf("\nERROR - 0x%x - %s for %s\n", result, Pa_GetErrorText(result), #_exp ); \ - gNumFailed++; \ - goto error; \ - } \ - } while(0) - -#define HOPEFOR(_exp) \ - do \ - { \ - if ((_exp)) {\ - gNumPassed++; \ - } \ - else { \ - printf("\nERROR - 0x%x - %s for %s\n", result, Pa_GetErrorText(result), #_exp ); \ - gNumFailed++; \ - } \ - } while(0) - -/*-------------------------------------------------------------------------*/ -/* This routine will be called by the PortAudio engine when audio is needed. - It may be called at interrupt level on some machines so don't do anything - that could mess up the system like calling malloc() or free(). -*/ -static int QaCallback( const void* inputBuffer, - void* outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void* userData ) -{ - unsigned long i; - unsigned char* out = (unsigned char *) outputBuffer; - PaQaData* data = (PaQaData *) userData; - - (void)inputBuffer; /* Prevent "unused variable" warnings. */ - - /* Zero out buffer so we don't hear terrible noise. */ - if( data->mode == MODE_OUTPUT ) - { - unsigned long numBytes = framesPerBuffer * data->numChannels * data->bytesPerSample; - for( i=0; iframesLeft > framesPerBuffer ) - { - data->framesLeft -= framesPerBuffer; - return 0; - } - else - { - data->framesLeft = 0; - return 1; - } -} - -static PaDeviceIndex FindInputOnlyDevice(void) -{ - PaDeviceIndex result = Pa_GetDefaultInputDevice(); - if( result != paNoDevice && Pa_GetDeviceInfo(result)->maxOutputChannels == 0 ) - return result; - - for( result = 0; result < Pa_GetDeviceCount(); ++result ) - { - if( Pa_GetDeviceInfo(result)->maxOutputChannels == 0 ) - return result; - } - - return paNoDevice; -} - -static PaDeviceIndex FindOutputOnlyDevice(void) -{ - PaDeviceIndex result = Pa_GetDefaultOutputDevice(); - if( result != paNoDevice && Pa_GetDeviceInfo(result)->maxInputChannels == 0 ) - return result; - - for( result = 0; result < Pa_GetDeviceCount(); ++result ) - { - if( Pa_GetDeviceInfo(result)->maxInputChannels == 0 ) - return result; - } - - return paNoDevice; -} - -/*-------------------------------------------------------------------------------------------------*/ -static int TestBadOpens( void ) -{ - PaStream* stream = NULL; - PaError result; - PaQaData myData; - PaStreamParameters ipp, opp; - const PaDeviceInfo* info = NULL; - - - /* Setup data for synthesis thread. */ - myData.framesLeft = (unsigned long) (SAMPLE_RATE * 100); /* 100 seconds */ - myData.numChannels = 1; - myData.mode = MODE_OUTPUT; - - /*----------------------------- No devices specified: */ - ipp.device = opp.device = paNoDevice; - ipp.channelCount = opp.channelCount = 0; /* Also no channels. */ - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - /* Take the low latency of the default device for all subsequent tests. */ - info = Pa_GetDeviceInfo(Pa_GetDefaultInputDevice()); - ipp.suggestedLatency = info ? info->defaultLowInputLatency : 0.100; - info = Pa_GetDeviceInfo(Pa_GetDefaultOutputDevice()); - opp.suggestedLatency = info ? info->defaultLowOutputLatency : 0.100; - HOPEFOR(((result = Pa_OpenStream(&stream, &ipp, &opp, - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, QaCallback, &myData )) == paInvalidDevice)); - - /*----------------------------- No devices specified #2: */ - HOPEFOR(((result = Pa_OpenStream(&stream, NULL, NULL, - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, QaCallback, &myData )) == paInvalidDevice)); - - /*----------------------------- Out of range input device specified: */ - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 0; ipp.device = Pa_GetDeviceCount(); /* And no output device, and no channels. */ - opp.channelCount = 0; opp.device = paNoDevice; - HOPEFOR(((result = Pa_OpenStream(&stream, &ipp, NULL, - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, QaCallback, &myData )) == paInvalidDevice)); - - /*----------------------------- Out of range output device specified: */ - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 0; ipp.device = paNoDevice; /* And no input device, and no channels. */ - opp.channelCount = 0; opp.device = Pa_GetDeviceCount(); - HOPEFOR(((result = Pa_OpenStream(&stream, NULL, &opp, - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, QaCallback, &myData )) == paInvalidDevice)); - - if (Pa_GetDefaultInputDevice() != paNoDevice) { - /*----------------------------- Zero input channels: */ - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 0; ipp.device = Pa_GetDefaultInputDevice(); - opp.channelCount = 0; opp.device = paNoDevice; /* And no output device, and no output channels. */ - HOPEFOR(((result = Pa_OpenStream(&stream, &ipp, NULL, - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, QaCallback, &myData )) == paInvalidChannelCount)); - } - - if (Pa_GetDefaultOutputDevice() != paNoDevice) { - /*----------------------------- Zero output channels: */ - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 0; ipp.device = paNoDevice; /* And no input device, and no input channels. */ - opp.channelCount = 0; opp.device = Pa_GetDefaultOutputDevice(); - HOPEFOR(((result = Pa_OpenStream(&stream, NULL, &opp, - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, QaCallback, &myData )) == paInvalidChannelCount)); - } - /*----------------------------- Nonzero input and output channels but no output device: */ - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 2; ipp.device = Pa_GetDefaultInputDevice(); /* Both stereo. */ - opp.channelCount = 2; opp.device = paNoDevice; - HOPEFOR(((result = Pa_OpenStream(&stream, &ipp, &opp, - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, QaCallback, &myData )) == paInvalidDevice)); - - /*----------------------------- Nonzero input and output channels but no input device: */ - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 2; ipp.device = paNoDevice; - opp.channelCount = 2; opp.device = Pa_GetDefaultOutputDevice(); - HOPEFOR(((result = Pa_OpenStream(&stream, &ipp, &opp, - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, QaCallback, &myData )) == paInvalidDevice)); - - if (Pa_GetDefaultOutputDevice() != paNoDevice) { - /*----------------------------- NULL stream pointer: */ - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 0; ipp.device = paNoDevice; /* Output is more likely than input. */ - opp.channelCount = 2; opp.device = Pa_GetDefaultOutputDevice(); /* Only 2 output channels. */ - HOPEFOR(((result = Pa_OpenStream(NULL, &ipp, &opp, - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, QaCallback, &myData )) == paBadStreamPtr)); - - /*----------------------------- Low sample rate: */ - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 0; ipp.device = paNoDevice; - opp.channelCount = 2; opp.device = Pa_GetDefaultOutputDevice(); - HOPEFOR(((result = Pa_OpenStream(&stream, NULL, &opp, - 1.0, FRAMES_PER_BUFFER, /* 1 cycle per second (1 Hz) is too low. */ - paClipOff, QaCallback, &myData )) == paInvalidSampleRate)); - - /*----------------------------- High sample rate: */ - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 0; ipp.device = paNoDevice; - opp.channelCount = 2; opp.device = Pa_GetDefaultOutputDevice(); - HOPEFOR(((result = Pa_OpenStream(&stream, NULL, &opp, - 10000000.0, FRAMES_PER_BUFFER, /* 10^6 cycles per second (10 MHz) is too high. */ - paClipOff, QaCallback, &myData )) == paInvalidSampleRate)); - - /*----------------------------- NULL callback: */ - /* NULL callback is valid in V19 -- it means use blocking read/write stream - - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 0; ipp.device = paNoDevice; - opp.channelCount = 2; opp.device = Pa_GetDefaultOutputDevice(); - HOPEFOR(((result = Pa_OpenStream(&stream, NULL, &opp, - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, - NULL, - &myData )) == paNullCallback)); - */ - - /*----------------------------- Bad flag: */ - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 0; ipp.device = paNoDevice; - opp.channelCount = 2; opp.device = Pa_GetDefaultOutputDevice(); - HOPEFOR(((result = Pa_OpenStream(&stream, NULL, &opp, - SAMPLE_RATE, FRAMES_PER_BUFFER, - 255, /* Is 8 maybe legal V19 API? */ - QaCallback, &myData )) == paInvalidFlag)); - } - - /*----------------------------- using input device as output device: */ - if( FindInputOnlyDevice() != paNoDevice ) - { - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 0; ipp.device = paNoDevice; /* And no input device, and no channels. */ - opp.channelCount = 2; opp.device = FindInputOnlyDevice(); - HOPEFOR(((result = Pa_OpenStream(&stream, NULL, &opp, - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, QaCallback, &myData )) == paInvalidChannelCount)); - } - - /*----------------------------- using output device as input device: */ - if( FindOutputOnlyDevice() != paNoDevice ) - { - ipp.hostApiSpecificStreamInfo = opp.hostApiSpecificStreamInfo = NULL; - ipp.sampleFormat = opp.sampleFormat = paFloat32; - ipp.channelCount = 2; ipp.device = FindOutputOnlyDevice(); - opp.channelCount = 0; opp.device = paNoDevice; /* And no output device, and no channels. */ - HOPEFOR(((result = Pa_OpenStream(&stream, &ipp, NULL, - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, QaCallback, &myData )) == paInvalidChannelCount)); - - } - - if( stream != NULL ) Pa_CloseStream( stream ); - return result; -} - -/*-----------------------------------------------------------------------------------------*/ -static int TestBadActions( void ) -{ - PaStream* stream = NULL; - const PaDeviceInfo* deviceInfo = NULL; - PaError result = 0; - PaQaData myData; - PaStreamParameters opp; - const PaDeviceInfo* info = NULL; - - /* Setup data for synthesis thread. */ - myData.framesLeft = (unsigned long)(SAMPLE_RATE * 100); /* 100 seconds */ - myData.numChannels = 1; - myData.mode = MODE_OUTPUT; - - opp.device = Pa_GetDefaultOutputDevice(); /* Default output. */ - opp.channelCount = 2; /* Stereo output. */ - opp.hostApiSpecificStreamInfo = NULL; - opp.sampleFormat = paFloat32; - info = Pa_GetDeviceInfo(opp.device); - opp.suggestedLatency = info ? info->defaultLowOutputLatency : 0.100; - - if (opp.device != paNoDevice) { - HOPEFOR(((result = Pa_OpenStream(&stream, NULL, /* Take NULL as input parame- */ - &opp, /* ters, meaning try only output. */ - SAMPLE_RATE, FRAMES_PER_BUFFER, - paClipOff, QaCallback, &myData )) == paNoError)); - } - - HOPEFOR(((deviceInfo = Pa_GetDeviceInfo(paNoDevice)) == NULL)); - HOPEFOR(((deviceInfo = Pa_GetDeviceInfo(87654)) == NULL)); - HOPEFOR(((result = Pa_StartStream(NULL)) == paBadStreamPtr)); - HOPEFOR(((result = Pa_StopStream(NULL)) == paBadStreamPtr)); - HOPEFOR(((result = Pa_IsStreamStopped(NULL)) == paBadStreamPtr)); - HOPEFOR(((result = Pa_IsStreamActive(NULL)) == paBadStreamPtr)); - HOPEFOR(((result = Pa_CloseStream(NULL)) == paBadStreamPtr)); - HOPEFOR(((result = Pa_SetStreamFinishedCallback(NULL, NULL)) == paBadStreamPtr)); - HOPEFOR(((result = !Pa_GetStreamInfo(NULL)))); - HOPEFOR(((result = Pa_GetStreamTime(NULL)) == 0.0)); - HOPEFOR(((result = Pa_GetStreamCpuLoad(NULL)) == 0.0)); - HOPEFOR(((result = Pa_ReadStream(NULL, NULL, 0)) == paBadStreamPtr)); - HOPEFOR(((result = Pa_WriteStream(NULL, NULL, 0)) == paBadStreamPtr)); - - /** @todo test Pa_GetStreamReadAvailable and Pa_GetStreamWriteAvailable */ - - if (stream != NULL) Pa_CloseStream(stream); - return result; -} - -/*---------------------------------------------------------------------*/ -int main(void); -int main(void) -{ - PaError result; - - EXPECT(((result = Pa_Initialize()) == paNoError)); - TestBadOpens(); - TestBadActions(); -error: - Pa_Terminate(); - printf("QA Report: %d passed, %d failed.\n", gNumPassed, gNumFailed); - return 0; -} diff --git a/spaces/amsterdamNLP/attention-rollout/description.md b/spaces/amsterdamNLP/attention-rollout/description.md deleted file mode 100644 index 41d2366c4d3b9e54ee9d69b239f6b27d3ed0d1f3..0000000000000000000000000000000000000000 --- a/spaces/amsterdamNLP/attention-rollout/description.md +++ /dev/null @@ -1,10 +0,0 @@ -# Attention Rollout -- RoBERTa - -In this demo, we use the RoBERTa language model (optimized for masked language modelling and finetuned -for sentiment analysis). The model predicts for a given sentences whether it expresses a positive, -negative or neutral sentiment. But how does it arrive at its classification? This is, surprisingly -perhaps, very difficult to determine. - -Abnar & Zuidema (2020) proposed a method for Transformers called **Attention Rollout**, which was further -refined by Chefer et al. (2021) into **Gradient-weighted Attention Rollout**. Here we compare them to -another popular method called **Integrated Gradients**. diff --git a/spaces/arch-123/bingo/src/pages/api/healthz.ts b/spaces/arch-123/bingo/src/pages/api/healthz.ts deleted file mode 100644 index f6ae44ff0fd66ccd3f7feaa550025fbf2a83bf77..0000000000000000000000000000000000000000 --- a/spaces/arch-123/bingo/src/pages/api/healthz.ts +++ /dev/null @@ -1,7 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - res.status(200).end('ok') -} diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Math/_IntegerCustom.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Math/_IntegerCustom.py deleted file mode 100644 index d6f6f751a848ed2b6285f3aeaa1313f7c82aa64b..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Math/_IntegerCustom.py +++ /dev/null @@ -1,118 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2018, Helder Eijs -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -from ._IntegerNative import IntegerNative - -from Crypto.Util.number import long_to_bytes, bytes_to_long - -from Crypto.Util._raw_api import (load_pycryptodome_raw_lib, - create_string_buffer, - get_raw_buffer, backend, - c_size_t, c_ulonglong) - - -from Crypto.Random.random import getrandbits - -c_defs = """ -int monty_pow(const uint8_t *base, - const uint8_t *exp, - const uint8_t *modulus, - uint8_t *out, - size_t len, - uint64_t seed); -""" - - -_raw_montgomery = load_pycryptodome_raw_lib("Crypto.Math._modexp", c_defs) -implementation = {"library": "custom", "api": backend} - - -class IntegerCustom(IntegerNative): - - @staticmethod - def from_bytes(byte_string, byteorder='big'): - if byteorder == 'big': - pass - elif byteorder == 'little': - byte_string = bytearray(byte_string) - byte_string.reverse() - else: - raise ValueError("Incorrect byteorder") - return IntegerCustom(bytes_to_long(byte_string)) - - def inplace_pow(self, exponent, modulus=None): - exp_value = int(exponent) - if exp_value < 0: - raise ValueError("Exponent must not be negative") - - # No modular reduction - if modulus is None: - self._value = pow(self._value, exp_value) - return self - - # With modular reduction - mod_value = int(modulus) - if mod_value < 0: - raise ValueError("Modulus must be positive") - if mod_value == 0: - raise ZeroDivisionError("Modulus cannot be zero") - - # C extension only works with odd moduli - if (mod_value & 1) == 0: - self._value = pow(self._value, exp_value, mod_value) - return self - - # C extension only works with bases smaller than modulus - if self._value >= mod_value: - self._value %= mod_value - - max_len = len(long_to_bytes(max(self._value, exp_value, mod_value))) - - base_b = long_to_bytes(self._value, max_len) - exp_b = long_to_bytes(exp_value, max_len) - modulus_b = long_to_bytes(mod_value, max_len) - - out = create_string_buffer(max_len) - - error = _raw_montgomery.monty_pow( - out, - base_b, - exp_b, - modulus_b, - c_size_t(max_len), - c_ulonglong(getrandbits(64)) - ) - - if error: - raise ValueError("monty_pow failed with error: %d" % error) - - result = bytes_to_long(get_raw_buffer(out)) - self._value = result - return self diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/PublicKey/test_import_RSA.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/PublicKey/test_import_RSA.py deleted file mode 100644 index fa92fb0aa4843006a8f2f601acd0c9960fd1a847..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/PublicKey/test_import_RSA.py +++ /dev/null @@ -1,590 +0,0 @@ -# -*- coding: utf-8 -*- -# -# SelfTest/PublicKey/test_importKey.py: Self-test for importing RSA keys -# -# =================================================================== -# The contents of this file are dedicated to the public domain. To -# the extent that dedication to the public domain is not available, -# everyone is granted a worldwide, perpetual, royalty-free, -# non-exclusive license to exercise all rights associated with the -# contents of this file for any purpose whatsoever. -# No rights are reserved. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# =================================================================== - -import os -import re -import errno -import warnings -import unittest - -from Crypto.PublicKey import RSA -from Crypto.SelfTest.st_common import a2b_hex, list_test_cases -from Crypto.Util.py3compat import b, tostr, FileNotFoundError -from Crypto.Util.number import inverse -from Crypto.Util import asn1 - -try: - import pycryptodome_test_vectors # type: ignore - test_vectors_available = True -except ImportError: - test_vectors_available = False - - -def load_file(file_name, mode="rb"): - results = None - - try: - if not test_vectors_available: - raise FileNotFoundError(errno.ENOENT, - os.strerror(errno.ENOENT), - file_name) - - dir_comps = ("PublicKey", "RSA") - init_dir = os.path.dirname(pycryptodome_test_vectors.__file__) - full_file_name = os.path.join(os.path.join(init_dir, *dir_comps), file_name) - with open(full_file_name, mode) as file_in: - results = file_in.read() - - except FileNotFoundError: - warnings.warn("Warning: skipping extended tests for RSA", - UserWarning, - stacklevel=2) - - return results - - -def der2pem(der, text='PUBLIC'): - import binascii - chunks = [binascii.b2a_base64(der[i:i+48]) for i in range(0, len(der), 48)] - pem = b('-----BEGIN %s KEY-----\n' % text) - pem += b('').join(chunks) - pem += b('-----END %s KEY-----' % text) - return pem - - -class ImportKeyTests(unittest.TestCase): - # 512-bit RSA key generated with openssl - rsaKeyPEM = u'''-----BEGIN RSA PRIVATE KEY----- -MIIBOwIBAAJBAL8eJ5AKoIsjURpcEoGubZMxLD7+kT+TLr7UkvEtFrRhDDKMtuII -q19FrL4pUIMymPMSLBn3hJLe30Dw48GQM4UCAwEAAQJACUSDEp8RTe32ftq8IwG8 -Wojl5mAd1wFiIOrZ/Uv8b963WJOJiuQcVN29vxU5+My9GPZ7RA3hrDBEAoHUDPrI -OQIhAPIPLz4dphiD9imAkivY31Rc5AfHJiQRA7XixTcjEkojAiEAyh/pJHks/Mlr -+rdPNEpotBjfV4M4BkgGAA/ipcmaAjcCIQCHvhwwKVBLzzTscT2HeUdEeBMoiXXK -JACAr3sJQJGxIQIgarRp+m1WSKV1MciwMaTOnbU7wxFs9DP1pva76lYBzgUCIQC9 -n0CnZCJ6IZYqSt0H5N7+Q+2Ro64nuwV/OSQfM6sBwQ== ------END RSA PRIVATE KEY-----''' - - # As above, but this is actually an unencrypted PKCS#8 key - rsaKeyPEM8 = u'''-----BEGIN PRIVATE KEY----- -MIIBVQIBADANBgkqhkiG9w0BAQEFAASCAT8wggE7AgEAAkEAvx4nkAqgiyNRGlwS -ga5tkzEsPv6RP5MuvtSS8S0WtGEMMoy24girX0WsvilQgzKY8xIsGfeEkt7fQPDj -wZAzhQIDAQABAkAJRIMSnxFN7fZ+2rwjAbxaiOXmYB3XAWIg6tn9S/xv3rdYk4mK -5BxU3b2/FTn4zL0Y9ntEDeGsMEQCgdQM+sg5AiEA8g8vPh2mGIP2KYCSK9jfVFzk -B8cmJBEDteLFNyMSSiMCIQDKH+kkeSz8yWv6t080Smi0GN9XgzgGSAYAD+KlyZoC -NwIhAIe+HDApUEvPNOxxPYd5R0R4EyiJdcokAICvewlAkbEhAiBqtGn6bVZIpXUx -yLAxpM6dtTvDEWz0M/Wm9rvqVgHOBQIhAL2fQKdkInohlipK3Qfk3v5D7ZGjrie7 -BX85JB8zqwHB ------END PRIVATE KEY-----''' - - # The same RSA private key as in rsaKeyPEM, but now encrypted - rsaKeyEncryptedPEM = ( - - # PEM encryption - # With DES and passphrase 'test' - ('test', u'''-----BEGIN RSA PRIVATE KEY----- -Proc-Type: 4,ENCRYPTED -DEK-Info: DES-CBC,AF8F9A40BD2FA2FC - -Ckl9ex1kaVEWhYC2QBmfaF+YPiR4NFkRXA7nj3dcnuFEzBnY5XULupqQpQI3qbfA -u8GYS7+b3toWWiHZivHbAAUBPDIZG9hKDyB9Sq2VMARGsX1yW1zhNvZLIiVJzUHs -C6NxQ1IJWOXzTew/xM2I26kPwHIvadq+/VaT8gLQdjdH0jOiVNaevjWnLgrn1mLP -BCNRMdcexozWtAFNNqSzfW58MJL2OdMi21ED184EFytIc1BlB+FZiGZduwKGuaKy -9bMbdb/1PSvsSzPsqW7KSSrTw6MgJAFJg6lzIYvR5F4poTVBxwBX3+EyEmShiaNY -IRX3TgQI0IjrVuLmvlZKbGWP18FXj7I7k9tSsNOOzllTTdq3ny5vgM3A+ynfAaxp -dysKznQ6P+IoqML1WxAID4aGRMWka+uArOJ148Rbj9s= ------END RSA PRIVATE KEY-----'''), - - # PKCS8 encryption - ('winter', u'''-----BEGIN ENCRYPTED PRIVATE KEY----- -MIIBpjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIeZIsbW3O+JcCAggA -MBQGCCqGSIb3DQMHBAgSM2p0D8FilgSCAWBhFyP2tiGKVpGj3mO8qIBzinU60ApR -3unvP+N6j7LVgnV2lFGaXbJ6a1PbQXe+2D6DUyBLo8EMXrKKVLqOMGkFMHc0UaV6 -R6MmrsRDrbOqdpTuVRW+NVd5J9kQQh4xnfU/QrcPPt7vpJvSf4GzG0n666Ki50OV -M/feuVlIiyGXY6UWdVDpcOV72cq02eNUs/1JWdh2uEBvA9fCL0c07RnMrdT+CbJQ -NjJ7f8ULtp7xvR9O3Al/yJ4Wv3i4VxF1f3MCXzhlUD4I0ONlr0kJWgeQ80q/cWhw -ntvgJwnCn2XR1h6LA8Wp+0ghDTsL2NhJpWd78zClGhyU4r3hqu1XDjoXa7YCXCix -jCV15+ViDJzlNCwg+W6lRg18sSLkCT7alviIE0U5tHc6UPbbHwT5QqAxAABaP+nZ -CGqJGyiwBzrKebjgSm/KRd4C91XqcsysyH2kKPfT51MLAoD4xelOURBP ------END ENCRYPTED PRIVATE KEY-----''' - ), - ) - - rsaPublicKeyPEM = u'''-----BEGIN PUBLIC KEY----- -MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAL8eJ5AKoIsjURpcEoGubZMxLD7+kT+T -Lr7UkvEtFrRhDDKMtuIIq19FrL4pUIMymPMSLBn3hJLe30Dw48GQM4UCAwEAAQ== ------END PUBLIC KEY-----''' - - # Obtained using 'ssh-keygen -i -m PKCS8 -f rsaPublicKeyPEM' - rsaPublicKeyOpenSSH = b('''ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQC/HieQCqCLI1EaXBKBrm2TMSw+/pE/ky6+1JLxLRa0YQwyjLbiCKtfRay+KVCDMpjzEiwZ94SS3t9A8OPBkDOF comment\n''') - - # The private key, in PKCS#1 format encoded with DER - rsaKeyDER = a2b_hex( - '''3082013b020100024100bf1e27900aa08b23511a5c1281ae6d93312c3efe - 913f932ebed492f12d16b4610c328cb6e208ab5f45acbe2950833298f312 - 2c19f78492dedf40f0e3c190338502030100010240094483129f114dedf6 - 7edabc2301bc5a88e5e6601dd7016220ead9fd4bfc6fdeb75893898ae41c - 54ddbdbf1539f8ccbd18f67b440de1ac30440281d40cfac839022100f20f - 2f3e1da61883f62980922bd8df545ce407c726241103b5e2c53723124a23 - 022100ca1fe924792cfcc96bfab74f344a68b418df578338064806000fe2 - a5c99a023702210087be1c3029504bcf34ec713d877947447813288975ca - 240080af7b094091b12102206ab469fa6d5648a57531c8b031a4ce9db53b - c3116cf433f5a6f6bbea5601ce05022100bd9f40a764227a21962a4add07 - e4defe43ed91a3ae27bb057f39241f33ab01c1 - '''.replace(" ","")) - - # The private key, in unencrypted PKCS#8 format encoded with DER - rsaKeyDER8 = a2b_hex( - '''30820155020100300d06092a864886f70d01010105000482013f3082013 - b020100024100bf1e27900aa08b23511a5c1281ae6d93312c3efe913f932 - ebed492f12d16b4610c328cb6e208ab5f45acbe2950833298f3122c19f78 - 492dedf40f0e3c190338502030100010240094483129f114dedf67edabc2 - 301bc5a88e5e6601dd7016220ead9fd4bfc6fdeb75893898ae41c54ddbdb - f1539f8ccbd18f67b440de1ac30440281d40cfac839022100f20f2f3e1da - 61883f62980922bd8df545ce407c726241103b5e2c53723124a23022100c - a1fe924792cfcc96bfab74f344a68b418df578338064806000fe2a5c99a0 - 23702210087be1c3029504bcf34ec713d877947447813288975ca240080a - f7b094091b12102206ab469fa6d5648a57531c8b031a4ce9db53bc3116cf - 433f5a6f6bbea5601ce05022100bd9f40a764227a21962a4add07e4defe4 - 3ed91a3ae27bb057f39241f33ab01c1 - '''.replace(" ","")) - - rsaPublicKeyDER = a2b_hex( - '''305c300d06092a864886f70d0101010500034b003048024100bf1e27900a - a08b23511a5c1281ae6d93312c3efe913f932ebed492f12d16b4610c328c - b6e208ab5f45acbe2950833298f3122c19f78492dedf40f0e3c190338502 - 03010001 - '''.replace(" ","")) - - n = int('BF 1E 27 90 0A A0 8B 23 51 1A 5C 12 81 AE 6D 93 31 2C 3E FE 91 3F 93 2E BE D4 92 F1 2D 16 B4 61 0C 32 8C B6 E2 08 AB 5F 45 AC BE 29 50 83 32 98 F3 12 2C 19 F7 84 92 DE DF 40 F0 E3 C1 90 33 85'.replace(" ",""),16) - e = 65537 - d = int('09 44 83 12 9F 11 4D ED F6 7E DA BC 23 01 BC 5A 88 E5 E6 60 1D D7 01 62 20 EA D9 FD 4B FC 6F DE B7 58 93 89 8A E4 1C 54 DD BD BF 15 39 F8 CC BD 18 F6 7B 44 0D E1 AC 30 44 02 81 D4 0C FA C8 39'.replace(" ",""),16) - p = int('00 F2 0F 2F 3E 1D A6 18 83 F6 29 80 92 2B D8 DF 54 5C E4 07 C7 26 24 11 03 B5 E2 C5 37 23 12 4A 23'.replace(" ",""),16) - q = int('00 CA 1F E9 24 79 2C FC C9 6B FA B7 4F 34 4A 68 B4 18 DF 57 83 38 06 48 06 00 0F E2 A5 C9 9A 02 37'.replace(" ",""),16) - - # This is q^{-1} mod p). fastmath and slowmath use pInv (p^{-1} - # mod q) instead! - qInv = int('00 BD 9F 40 A7 64 22 7A 21 96 2A 4A DD 07 E4 DE FE 43 ED 91 A3 AE 27 BB 05 7F 39 24 1F 33 AB 01 C1'.replace(" ",""),16) - pInv = inverse(p,q) - - def testImportKey1(self): - """Verify import of RSAPrivateKey DER SEQUENCE""" - key = RSA.importKey(self.rsaKeyDER) - self.assertTrue(key.has_private()) - self.assertEqual(key.n, self.n) - self.assertEqual(key.e, self.e) - self.assertEqual(key.d, self.d) - self.assertEqual(key.p, self.p) - self.assertEqual(key.q, self.q) - - def testImportKey2(self): - """Verify import of SubjectPublicKeyInfo DER SEQUENCE""" - key = RSA.importKey(self.rsaPublicKeyDER) - self.assertFalse(key.has_private()) - self.assertEqual(key.n, self.n) - self.assertEqual(key.e, self.e) - - def testImportKey3unicode(self): - """Verify import of RSAPrivateKey DER SEQUENCE, encoded with PEM as unicode""" - key = RSA.importKey(self.rsaKeyPEM) - self.assertEqual(key.has_private(),True) # assert_ - self.assertEqual(key.n, self.n) - self.assertEqual(key.e, self.e) - self.assertEqual(key.d, self.d) - self.assertEqual(key.p, self.p) - self.assertEqual(key.q, self.q) - - def testImportKey3bytes(self): - """Verify import of RSAPrivateKey DER SEQUENCE, encoded with PEM as byte string""" - key = RSA.importKey(b(self.rsaKeyPEM)) - self.assertEqual(key.has_private(),True) # assert_ - self.assertEqual(key.n, self.n) - self.assertEqual(key.e, self.e) - self.assertEqual(key.d, self.d) - self.assertEqual(key.p, self.p) - self.assertEqual(key.q, self.q) - - def testImportKey4unicode(self): - """Verify import of RSAPrivateKey DER SEQUENCE, encoded with PEM as unicode""" - key = RSA.importKey(self.rsaPublicKeyPEM) - self.assertEqual(key.has_private(),False) # assertFalse - self.assertEqual(key.n, self.n) - self.assertEqual(key.e, self.e) - - def testImportKey4bytes(self): - """Verify import of SubjectPublicKeyInfo DER SEQUENCE, encoded with PEM as byte string""" - key = RSA.importKey(b(self.rsaPublicKeyPEM)) - self.assertEqual(key.has_private(),False) # assertFalse - self.assertEqual(key.n, self.n) - self.assertEqual(key.e, self.e) - - def testImportKey5(self): - """Verifies that the imported key is still a valid RSA pair""" - key = RSA.importKey(self.rsaKeyPEM) - idem = key._encrypt(key._decrypt(89)) - self.assertEqual(idem, 89) - - def testImportKey6(self): - """Verifies that the imported key is still a valid RSA pair""" - key = RSA.importKey(self.rsaKeyDER) - idem = key._encrypt(key._decrypt(65)) - self.assertEqual(idem, 65) - - def testImportKey7(self): - """Verify import of OpenSSH public key""" - key = RSA.importKey(self.rsaPublicKeyOpenSSH) - self.assertEqual(key.n, self.n) - self.assertEqual(key.e, self.e) - - def testImportKey8(self): - """Verify import of encrypted PrivateKeyInfo DER SEQUENCE""" - for t in self.rsaKeyEncryptedPEM: - key = RSA.importKey(t[1], t[0]) - self.assertTrue(key.has_private()) - self.assertEqual(key.n, self.n) - self.assertEqual(key.e, self.e) - self.assertEqual(key.d, self.d) - self.assertEqual(key.p, self.p) - self.assertEqual(key.q, self.q) - - def testImportKey9(self): - """Verify import of unencrypted PrivateKeyInfo DER SEQUENCE""" - key = RSA.importKey(self.rsaKeyDER8) - self.assertTrue(key.has_private()) - self.assertEqual(key.n, self.n) - self.assertEqual(key.e, self.e) - self.assertEqual(key.d, self.d) - self.assertEqual(key.p, self.p) - self.assertEqual(key.q, self.q) - - def testImportKey10(self): - """Verify import of unencrypted PrivateKeyInfo DER SEQUENCE, encoded with PEM""" - key = RSA.importKey(self.rsaKeyPEM8) - self.assertTrue(key.has_private()) - self.assertEqual(key.n, self.n) - self.assertEqual(key.e, self.e) - self.assertEqual(key.d, self.d) - self.assertEqual(key.p, self.p) - self.assertEqual(key.q, self.q) - - def testImportKey11(self): - """Verify import of RSAPublicKey DER SEQUENCE""" - der = asn1.DerSequence([17, 3]).encode() - key = RSA.importKey(der) - self.assertEqual(key.n, 17) - self.assertEqual(key.e, 3) - - def testImportKey12(self): - """Verify import of RSAPublicKey DER SEQUENCE, encoded with PEM""" - der = asn1.DerSequence([17, 3]).encode() - pem = der2pem(der) - key = RSA.importKey(pem) - self.assertEqual(key.n, 17) - self.assertEqual(key.e, 3) - - def test_import_key_windows_cr_lf(self): - pem_cr_lf = "\r\n".join(self.rsaKeyPEM.splitlines()) - key = RSA.importKey(pem_cr_lf) - self.assertEqual(key.n, self.n) - self.assertEqual(key.e, self.e) - self.assertEqual(key.d, self.d) - self.assertEqual(key.p, self.p) - self.assertEqual(key.q, self.q) - - def test_import_empty(self): - self.assertRaises(ValueError, RSA.import_key, b"") - - ### - def testExportKey1(self): - key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv]) - derKey = key.export_key("DER") - self.assertEqual(derKey, self.rsaKeyDER) - - def testExportKey2(self): - key = RSA.construct([self.n, self.e]) - derKey = key.export_key("DER") - self.assertEqual(derKey, self.rsaPublicKeyDER) - - def testExportKey3(self): - key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv]) - pemKey = key.export_key("PEM") - self.assertEqual(pemKey, b(self.rsaKeyPEM)) - - def testExportKey4(self): - key = RSA.construct([self.n, self.e]) - pemKey = key.export_key("PEM") - self.assertEqual(pemKey, b(self.rsaPublicKeyPEM)) - - def testExportKey5(self): - key = RSA.construct([self.n, self.e]) - openssh_1 = key.export_key("OpenSSH").split() - openssh_2 = self.rsaPublicKeyOpenSSH.split() - self.assertEqual(openssh_1[0], openssh_2[0]) - self.assertEqual(openssh_1[1], openssh_2[1]) - - def testExportKey7(self): - key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv]) - derKey = key.export_key("DER", pkcs=8) - self.assertEqual(derKey, self.rsaKeyDER8) - - def testExportKey8(self): - key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv]) - pemKey = key.export_key("PEM", pkcs=8) - self.assertEqual(pemKey, b(self.rsaKeyPEM8)) - - def testExportKey9(self): - key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv]) - self.assertRaises(ValueError, key.export_key, "invalid-format") - - def testExportKey10(self): - # Export and re-import the encrypted key. It must match. - # PEM envelope, PKCS#1, old PEM encryption - key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv]) - outkey = key.export_key('PEM', 'test') - self.assertTrue(tostr(outkey).find('4,ENCRYPTED')!=-1) - self.assertTrue(tostr(outkey).find('BEGIN RSA PRIVATE KEY')!=-1) - inkey = RSA.importKey(outkey, 'test') - self.assertEqual(key.n, inkey.n) - self.assertEqual(key.e, inkey.e) - self.assertEqual(key.d, inkey.d) - - def testExportKey11(self): - # Export and re-import the encrypted key. It must match. - # PEM envelope, PKCS#1, old PEM encryption - key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv]) - outkey = key.export_key('PEM', 'test', pkcs=1) - self.assertTrue(tostr(outkey).find('4,ENCRYPTED')!=-1) - self.assertTrue(tostr(outkey).find('BEGIN RSA PRIVATE KEY')!=-1) - inkey = RSA.importKey(outkey, 'test') - self.assertEqual(key.n, inkey.n) - self.assertEqual(key.e, inkey.e) - self.assertEqual(key.d, inkey.d) - - def testExportKey12(self): - # Export and re-import the encrypted key. It must match. - # PEM envelope, PKCS#8, old PEM encryption - key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv]) - outkey = key.export_key('PEM', 'test', pkcs=8) - self.assertTrue(tostr(outkey).find('4,ENCRYPTED')!=-1) - self.assertTrue(tostr(outkey).find('BEGIN PRIVATE KEY')!=-1) - inkey = RSA.importKey(outkey, 'test') - self.assertEqual(key.n, inkey.n) - self.assertEqual(key.e, inkey.e) - self.assertEqual(key.d, inkey.d) - - def testExportKey13(self): - # Export and re-import the encrypted key. It must match. - # PEM envelope, PKCS#8, PKCS#8 encryption - key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv]) - outkey = key.export_key('PEM', 'test', pkcs=8, - protection='PBKDF2WithHMAC-SHA1AndDES-EDE3-CBC') - self.assertTrue(tostr(outkey).find('4,ENCRYPTED')==-1) - self.assertTrue(tostr(outkey).find('BEGIN ENCRYPTED PRIVATE KEY')!=-1) - inkey = RSA.importKey(outkey, 'test') - self.assertEqual(key.n, inkey.n) - self.assertEqual(key.e, inkey.e) - self.assertEqual(key.d, inkey.d) - - def testExportKey14(self): - # Export and re-import the encrypted key. It must match. - # DER envelope, PKCS#8, PKCS#8 encryption - key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv]) - outkey = key.export_key('DER', 'test', pkcs=8) - inkey = RSA.importKey(outkey, 'test') - self.assertEqual(key.n, inkey.n) - self.assertEqual(key.e, inkey.e) - self.assertEqual(key.d, inkey.d) - - def testExportKey15(self): - # Verify that that error an condition is detected when trying to - # use a password with DER encoding and PKCS#1. - key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv]) - self.assertRaises(ValueError, key.export_key, 'DER', 'test', 1) - - def test_import_key(self): - """Verify that import_key is an alias to importKey""" - key = RSA.import_key(self.rsaPublicKeyDER) - self.assertFalse(key.has_private()) - self.assertEqual(key.n, self.n) - self.assertEqual(key.e, self.e) - - def test_import_key_ba_mv(self): - """Verify that import_key can be used on bytearrays and memoryviews""" - key = RSA.import_key(bytearray(self.rsaPublicKeyDER)) - key = RSA.import_key(memoryview(self.rsaPublicKeyDER)) - - def test_exportKey(self): - key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv]) - self.assertEqual(key.export_key(), key.exportKey()) - - -class ImportKeyFromX509Cert(unittest.TestCase): - - def test_x509v1(self): - - # Sample V1 certificate with a 1024 bit RSA key - x509_v1_cert = """ ------BEGIN CERTIFICATE----- -MIICOjCCAaMCAQEwDQYJKoZIhvcNAQEEBQAwfjENMAsGA1UEChMEQWNtZTELMAkG -A1UECxMCUkQxHDAaBgkqhkiG9w0BCQEWDXNwYW1AYWNtZS5vcmcxEzARBgNVBAcT -Ck1ldHJvcG9saXMxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYDVQQGEwJVUzENMAsG -A1UEAxMEdGVzdDAeFw0xNDA3MTExOTU3MjRaFw0xNzA0MDYxOTU3MjRaME0xCzAJ -BgNVBAYTAlVTMREwDwYDVQQIEwhOZXcgWW9yazENMAsGA1UEChMEQWNtZTELMAkG -A1UECxMCUkQxDzANBgNVBAMTBmxhdHZpYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAw -gYkCgYEAyG+kytdRj3TFbRmHDYp3TXugVQ81chew0qeOxZWOz80IjtWpgdOaCvKW -NCuc8wUR9BWrEQW+39SaRMLiQfQtyFSQZijc3nsEBu/Lo4uWZ0W/FHDRVSvkJA/V -Ex5NL5ikI+wbUeCV5KajGNDalZ8F1pk32+CBs8h1xNx5DyxuEHUCAwEAATANBgkq -hkiG9w0BAQQFAAOBgQCVQF9Y//Q4Psy+umEM38pIlbZ2hxC5xNz/MbVPwuCkNcGn -KYNpQJP+JyVTsPpO8RLZsAQDzRueMI3S7fbbwTzAflN0z19wvblvu93xkaBytVok -9VBAH28olVhy9b1MMeg2WOt5sUEQaFNPnwwsyiY9+HsRpvpRnPSQF+kyYVsshQ== ------END CERTIFICATE----- - """.strip() - - # RSA public key as dumped by openssl - exponent = 65537 - modulus_str = """ -00:c8:6f:a4:ca:d7:51:8f:74:c5:6d:19:87:0d:8a: -77:4d:7b:a0:55:0f:35:72:17:b0:d2:a7:8e:c5:95: -8e:cf:cd:08:8e:d5:a9:81:d3:9a:0a:f2:96:34:2b: -9c:f3:05:11:f4:15:ab:11:05:be:df:d4:9a:44:c2: -e2:41:f4:2d:c8:54:90:66:28:dc:de:7b:04:06:ef: -cb:a3:8b:96:67:45:bf:14:70:d1:55:2b:e4:24:0f: -d5:13:1e:4d:2f:98:a4:23:ec:1b:51:e0:95:e4:a6: -a3:18:d0:da:95:9f:05:d6:99:37:db:e0:81:b3:c8: -75:c4:dc:79:0f:2c:6e:10:75 - """ - modulus = int(re.sub("[^0-9a-f]","", modulus_str), 16) - - key = RSA.importKey(x509_v1_cert) - self.assertEqual(key.e, exponent) - self.assertEqual(key.n, modulus) - self.assertFalse(key.has_private()) - - def test_x509v3(self): - - # Sample V3 certificate with a 1024 bit RSA key - x509_v3_cert = """ ------BEGIN CERTIFICATE----- -MIIEcjCCAlqgAwIBAgIBATANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQGEwJVUzEL -MAkGA1UECAwCTUQxEjAQBgNVBAcMCUJhbHRpbW9yZTEQMA4GA1UEAwwHVGVzdCBD -QTEfMB0GCSqGSIb3DQEJARYQdGVzdEBleGFtcGxlLmNvbTAeFw0xNDA3MTIwOTM1 -MTJaFw0xNzA0MDcwOTM1MTJaMEQxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJNRDES -MBAGA1UEBwwJQmFsdGltb3JlMRQwEgYDVQQDDAtUZXN0IFNlcnZlcjCBnzANBgkq -hkiG9w0BAQEFAAOBjQAwgYkCgYEA/S7GJV2OcFdyNMQ4K75KrYFtMEn3VnEFdPHa -jyS37XlMxSh0oS4GeTGVUCJInl5Cpsv8WQdh03FfeOdvzp5IZ46OcjeOPiWnmjgl -2G5j7e2bDH7RSchGV+OD6Fb1Agvuu2/9iy8fdf3rPQ/7eAddzKUrzwacVbnW+tg2 -QtSXKRcCAwEAAaOB1TCB0jAdBgNVHQ4EFgQU/WwCX7FfWMIPDFfJ+I8a2COG+l8w -HwYDVR0jBBgwFoAUa0hkif3RMaraiWtsOOZZlLu9wJwwCQYDVR0TBAIwADALBgNV -HQ8EBAMCBeAwSgYDVR0RBEMwQYILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxlLmNv -bYIQbWFpbC5leGFtcGxlLmNvbYIPZnRwLmV4YW1wbGUuY29tMCwGCWCGSAGG+EIB -DQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsF -AAOCAgEAvO6xfdsGbnoK4My3eJthodTAjMjPwFVY133LH04QLcCv54TxKhtUg1fi -PgdjVe1HpTytPBfXy2bSZbXAN0abZCtw1rYrnn7o1g2pN8iypVq3zVn0iMTzQzxs -zEPO3bpR/UhNSf90PmCsS5rqZpAAnXSaAy1ClwHWk/0eG2pYkhE1m1ABVMN2lsAW -e9WxGk6IFqaI9O37NYQwmEypMs4DC+ECJEvbPFiqi3n0gbXCZJJ6omDA5xJldaYK -Oa7KR3s/qjBsu9UAiWpLBuFoSTHIF2aeRKRFmUdmzwo43eVPep65pY6eQ4AdL2RF -rqEuINbGlzI5oQyYhu71IwB+iPZXaZZPlwjLgOsuad/p2hOgDb5WxUi8FnDPursQ -ujfpIpmrOP/zpvvQWnwePI3lI+5n41kTBSbefXEdv6rXpHk3QRzB90uPxnXPdxSC -16ASA8bQT5an/1AgoE3k9CrcD2K0EmgaX0YI0HUhkyzbkg34EhpWJ6vvRUbRiNRo -9cIbt/ya9Y9u0Ja8GLXv6dwX0l0IdJMkL8KifXUFAVCujp1FBrr/gdmwQn8itANy -+qbnWSxmOvtaY0zcaFAcONuHva0h51/WqXOMO1eb8PhR4HIIYU8p1oBwQp7dSni8 -THDi1F+GG5PsymMDj5cWK42f+QzjVw5PrVmFqqrrEoMlx8DWh5Y= ------END CERTIFICATE----- -""".strip() - - # RSA public key as dumped by openssl - exponent = 65537 - modulus_str = """ -00:fd:2e:c6:25:5d:8e:70:57:72:34:c4:38:2b:be: -4a:ad:81:6d:30:49:f7:56:71:05:74:f1:da:8f:24: -b7:ed:79:4c:c5:28:74:a1:2e:06:79:31:95:50:22: -48:9e:5e:42:a6:cb:fc:59:07:61:d3:71:5f:78:e7: -6f:ce:9e:48:67:8e:8e:72:37:8e:3e:25:a7:9a:38: -25:d8:6e:63:ed:ed:9b:0c:7e:d1:49:c8:46:57:e3: -83:e8:56:f5:02:0b:ee:bb:6f:fd:8b:2f:1f:75:fd: -eb:3d:0f:fb:78:07:5d:cc:a5:2b:cf:06:9c:55:b9: -d6:fa:d8:36:42:d4:97:29:17 - """ - modulus = int(re.sub("[^0-9a-f]","", modulus_str), 16) - - key = RSA.importKey(x509_v3_cert) - self.assertEqual(key.e, exponent) - self.assertEqual(key.n, modulus) - self.assertFalse(key.has_private()) - - -class TestImport_2048(unittest.TestCase): - - def test_import_openssh_public(self): - key_file_ref = load_file("rsa2048_private.pem") - key_file = load_file("rsa2048_public_openssh.txt") - - # Skip test if test vectors are not installed - if None in (key_file_ref, key_file): - return - - key_ref = RSA.import_key(key_file_ref).public_key() - key = RSA.import_key(key_file) - self.assertEqual(key_ref, key) - - def test_import_openssh_private_clear(self): - key_file = load_file("rsa2048_private_openssh.pem") - key_file_old = load_file("rsa2048_private_openssh_old.pem") - - # Skip test if test vectors are not installed - if None in (key_file_old, key_file): - return - - key = RSA.import_key(key_file) - key_old = RSA.import_key(key_file_old) - - self.assertEqual(key, key_old) - - def test_import_openssh_private_password(self): - key_file = load_file("rsa2048_private_openssh_pwd.pem") - key_file_old = load_file("rsa2048_private_openssh_pwd_old.pem") - - # Skip test if test vectors are not installed - if None in (key_file_old, key_file): - return - - key = RSA.import_key(key_file, b"password") - key_old = RSA.import_key(key_file_old) - self.assertEqual(key, key_old) - - -if __name__ == '__main__': - unittest.main() - - -def get_tests(config={}): - tests = [] - tests += list_test_cases(ImportKeyTests) - tests += list_test_cases(ImportKeyFromX509Cert) - tests += list_test_cases(TestImport_2048) - return tests - - -if __name__ == '__main__': - suite = lambda: unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') - -# vim:set ts=4 sw=4 sts=4 expandtab: diff --git a/spaces/ashercn97/AsherTesting/css/html_readable_style.css b/spaces/ashercn97/AsherTesting/css/html_readable_style.css deleted file mode 100644 index cd5fca97868167718d239b4be72e9271971807e2..0000000000000000000000000000000000000000 --- a/spaces/ashercn97/AsherTesting/css/html_readable_style.css +++ /dev/null @@ -1,29 +0,0 @@ -.container { - max-width: 600px; - margin-left: auto; - margin-right: auto; - background-color: rgb(31, 41, 55); - padding: 3em; - word-break: break-word; - overflow-wrap: anywhere; - color: #efefef !important; -} - -.container p, .container li { - font-size: 16px !important; - color: #efefef !important; - margin-bottom: 22px; - line-height: 1.4 !important; -} - -.container li > p { - display: inline !important; -} - -.container code { - overflow-x: auto; -} - -.container :not(pre) > code { - white-space: normal !important; -} \ No newline at end of file diff --git a/spaces/ashercn97/AsherTesting/loras/ashercn97_code-llama-slay/README.md b/spaces/ashercn97/AsherTesting/loras/ashercn97_code-llama-slay/README.md deleted file mode 100644 index badebfa87bae5dbac1a6a3ed5dea5a21641e974a..0000000000000000000000000000000000000000 --- a/spaces/ashercn97/AsherTesting/loras/ashercn97_code-llama-slay/README.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -pipeline_tag: text-generation -library_name: peft ---- \ No newline at end of file diff --git a/spaces/auto-academic/auto-draft/models/embeddings.py b/spaces/auto-academic/auto-draft/models/embeddings.py deleted file mode 100644 index 308a3a7882eedb2c0406782276b3e363daac08e5..0000000000000000000000000000000000000000 --- a/spaces/auto-academic/auto-draft/models/embeddings.py +++ /dev/null @@ -1,21 +0,0 @@ -from langchain.embeddings import HuggingFaceEmbeddings -import os - -openai_api_key = os.getenv("OPENAI_API_KEY") -if openai_api_key is not None: - from langchain.embeddings.openai import OpenAIEmbeddings - openai_embedding = OpenAIEmbeddings(model="text-embedding-ada-002", openai_api_key=openai_api_key) -else: - openai_embedding = None - -model_name = 'sentence-transformers/all-MiniLM-L6-v2' -model_kwargs = {'device': 'cpu'} -encode_kwargs = {'normalize_embeddings': False} - -all_minilm_l6_v2 = HuggingFaceEmbeddings( - model_name=model_name, - model_kwargs=model_kwargs, - encode_kwargs=encode_kwargs) - - -EMBEDDINGS = {"text-embedding-ada-002": openai_embedding, "all-MiniLM-L6-v2": all_minilm_l6_v2} \ No newline at end of file diff --git a/spaces/avivdm1/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md b/spaces/avivdm1/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index a4f28a3d27d66d79cb95f2b8b847832172bb5f11..0000000000000000000000000000000000000000 --- a/spaces/avivdm1/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,40 +0,0 @@ - - - - -### Background - - -### Changes - - -### Documentation - - -### Test Plan - - -### PR Quality Checklist -- [ ] My pull request is atomic and focuses on a single change. -- [ ] I have thoroughly tested my changes with multiple different prompts. -- [ ] I have considered potential risks and mitigations for my changes. -- [ ] I have documented my changes clearly and comprehensively. -- [ ] I have not snuck in any "extra" small tweaks changes - - - - diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/modules/runtime.py b/spaces/awaawawawa/iurf7irfuyytruyyugb/modules/runtime.py deleted file mode 100644 index 8d4bcb75ebe1b4419443669b91da7e3bfe5704c2..0000000000000000000000000000000000000000 --- a/spaces/awaawawawa/iurf7irfuyytruyyugb/modules/runtime.py +++ /dev/null @@ -1,682 +0,0 @@ -import json -import os, re -import traceback -import torch -import numpy as np -from omegaconf import OmegaConf -from PIL import Image, ImageOps -from tqdm import tqdm, trange -from itertools import islice -from einops import rearrange -import time -from pytorch_lightning import seed_everything -from torch import autocast -from contextlib import nullcontext -from einops import rearrange, repeat -from ldmlib.util import instantiate_from_config -from optimizedSD.optimUtils import split_weighted_subprompts -from transformers import logging - -from gfpgan import GFPGANer -from basicsr.archs.rrdbnet_arch import RRDBNet -from realesrgan import RealESRGANer - -import uuid - -logging.set_verbosity_error() - -# consts -config_yaml = "optimizedSD/v1-inference.yaml" -filename_regex = re.compile('[^a-zA-Z0-9]') - -# api stuff -from sd_internal import Request, Response, Image as ResponseImage -import base64 -from io import BytesIO -#from colorama import Fore - -# local -stop_processing = False -temp_images = {} - -ckpt_file = None -gfpgan_file = None -real_esrgan_file = None - -model = None -modelCS = None -modelFS = None -model_gfpgan = None -model_real_esrgan = None - -model_is_half = False -model_fs_is_half = False -device = None -unet_bs = 1 -precision = 'autocast' -sampler_plms = None -sampler_ddim = None - -has_valid_gpu = False -force_full_precision = False -try: - gpu = torch.cuda.current_device() - gpu_name = torch.cuda.get_device_name(gpu) - print('GPU detected: ', gpu_name) - - force_full_precision = ('nvidia' in gpu_name.lower() or 'geforce' in gpu_name.lower()) and (' 1660' in gpu_name or ' 1650' in gpu_name) # otherwise these NVIDIA cards create green images - if force_full_precision: - print('forcing full precision on NVIDIA 16xx cards, to avoid green images. GPU detected: ', gpu_name) - - mem_free, mem_total = torch.cuda.mem_get_info(gpu) - mem_total /= float(10**9) - if mem_total < 3.0: - print("GPUs with less than 3 GB of VRAM are not compatible with Stable Diffusion") - raise Exception() - - has_valid_gpu = True -except: - print('WARNING: No compatible GPU found. Using the CPU, but this will be very slow!') - pass - -def load_model_ckpt(ckpt_to_use, device_to_use='cuda', turbo=False, unet_bs_to_use=1, precision_to_use='autocast'): - global ckpt_file, model, modelCS, modelFS, model_is_half, device, unet_bs, precision, model_fs_is_half - - device = device_to_use if has_valid_gpu else 'cpu' - precision = precision_to_use if not force_full_precision else 'full' - unet_bs = unet_bs_to_use - - unload_model() - - if device == 'cpu': - precision = 'full' - - sd = load_model_from_config(f"{ckpt_to_use}.ckpt") - li, lo = [], [] - for key, value in sd.items(): - sp = key.split(".") - if (sp[0]) == "model": - if "input_blocks" in sp: - li.append(key) - elif "middle_block" in sp: - li.append(key) - elif "time_embed" in sp: - li.append(key) - else: - lo.append(key) - for key in li: - sd["model1." + key[6:]] = sd.pop(key) - for key in lo: - sd["model2." + key[6:]] = sd.pop(key) - - config = OmegaConf.load(f"{config_yaml}") - - model = instantiate_from_config(config.modelUNet) - _, _ = model.load_state_dict(sd, strict=False) - model.eval() - model.cdevice = device - model.unet_bs = unet_bs - model.turbo = turbo - - modelCS = instantiate_from_config(config.modelCondStage) - _, _ = modelCS.load_state_dict(sd, strict=False) - modelCS.eval() - modelCS.cond_stage_model.device = device - - modelFS = instantiate_from_config(config.modelFirstStage) - _, _ = modelFS.load_state_dict(sd, strict=False) - modelFS.eval() - del sd - - if device != "cpu" and precision == "autocast": - model.half() - modelCS.half() - modelFS.half() - model_is_half = True - model_fs_is_half = True - else: - model_is_half = False - model_fs_is_half = False - - ckpt_file = ckpt_to_use - - print('loaded ', ckpt_file, 'to', device, 'precision', precision) - -def unload_model(): - global model, modelCS, modelFS - - if model is not None: - del model - del modelCS - del modelFS - - model = None - modelCS = None - modelFS = None - -def load_model_gfpgan(gfpgan_to_use): - global gfpgan_file, model_gfpgan - - if gfpgan_to_use is None: - return - - gfpgan_file = gfpgan_to_use - model_path = gfpgan_to_use + ".pth" - - if device == 'cpu': - model_gfpgan = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device('cpu')) - else: - model_gfpgan = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device('cuda')) - - print('loaded ', gfpgan_to_use, 'to', device, 'precision', precision) - -def load_model_real_esrgan(real_esrgan_to_use): - global real_esrgan_file, model_real_esrgan - - if real_esrgan_to_use is None: - return - - real_esrgan_file = real_esrgan_to_use - model_path = real_esrgan_to_use + ".pth" - - RealESRGAN_models = { - 'RealESRGAN_x4plus': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4), - 'RealESRGAN_x4plus_anime_6B': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) - } - - model_to_use = RealESRGAN_models[real_esrgan_to_use] - - if device == 'cpu': - model_real_esrgan = RealESRGANer(scale=2, model_path=model_path, model=model_to_use, pre_pad=0, half=False) # cpu does not support half - model_real_esrgan.device = torch.device('cpu') - model_real_esrgan.model.to('cpu') - else: - model_real_esrgan = RealESRGANer(scale=2, model_path=model_path, model=model_to_use, pre_pad=0, half=model_is_half) - - model_real_esrgan.model.name = real_esrgan_to_use - - print('loaded ', real_esrgan_to_use, 'to', device, 'precision', precision) - -def mk_img(req: Request): - try: - yield from do_mk_img(req) - except Exception as e: - print(traceback.format_exc()) - - gc() - - if device != "cpu": - modelFS.to("cpu") - modelCS.to("cpu") - - model.model1.to("cpu") - model.model2.to("cpu") - - gc() - - yield json.dumps({ - "status": 'failed', - "detail": str(e) - }) - -def do_mk_img(req: Request): - global ckpt_file - global model, modelCS, modelFS, device - global model_gfpgan, model_real_esrgan - global stop_processing - - stop_processing = False - - res = Response() - res.request = req - res.images = [] - - temp_images.clear() - - # custom model support: - # the req.use_stable_diffusion_model needs to be a valid path - # to the ckpt file (without the extension). - - needs_model_reload = False - ckpt_to_use = ckpt_file - if ckpt_to_use != req.use_stable_diffusion_model: - ckpt_to_use = req.use_stable_diffusion_model - needs_model_reload = True - - model.turbo = req.turbo - if req.use_cpu: - if device != 'cpu': - device = 'cpu' - - if model_is_half: - load_model_ckpt(ckpt_to_use, device) - needs_model_reload = False - - load_model_gfpgan(gfpgan_file) - load_model_real_esrgan(real_esrgan_file) - else: - if has_valid_gpu: - prev_device = device - device = 'cuda' - - if (precision == 'autocast' and (req.use_full_precision or not model_is_half)) or \ - (precision == 'full' and not req.use_full_precision and not force_full_precision): - - load_model_ckpt(ckpt_to_use, device, req.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast')) - needs_model_reload = False - - if prev_device != device: - load_model_gfpgan(gfpgan_file) - load_model_real_esrgan(real_esrgan_file) - - if needs_model_reload: - load_model_ckpt(ckpt_to_use, device, req.turbo, unet_bs, precision) - - if req.use_face_correction != gfpgan_file: - load_model_gfpgan(req.use_face_correction) - - if req.use_upscale != real_esrgan_file: - load_model_real_esrgan(req.use_upscale) - - model.cdevice = device - modelCS.cond_stage_model.device = device - - opt_prompt = req.prompt - opt_seed = req.seed - opt_n_samples = req.num_outputs - opt_n_iter = 1 - opt_scale = req.guidance_scale - opt_C = 4 - opt_H = req.height - opt_W = req.width - opt_f = 8 - opt_ddim_steps = req.num_inference_steps - opt_ddim_eta = 0.0 - opt_strength = req.prompt_strength - opt_save_to_disk_path = req.save_to_disk_path - opt_init_img = req.init_image - opt_use_face_correction = req.use_face_correction - opt_use_upscale = req.use_upscale - opt_show_only_filtered = req.show_only_filtered_image - opt_format = req.output_format - opt_sampler_name = req.sampler - - print(req.to_string(), '\n device', device) - - print('\n\n Using precision:', precision) - - seed_everything(opt_seed) - - batch_size = opt_n_samples - prompt = opt_prompt - assert prompt is not None - data = [batch_size * [prompt]] - - if precision == "autocast" and device != "cpu": - precision_scope = autocast - else: - precision_scope = nullcontext - - mask = None - - if req.init_image is None: - handler = _txt2img - - init_latent = None - t_enc = None - else: - handler = _img2img - - init_image = load_img(req.init_image, opt_W, opt_H) - init_image = init_image.to(device) - - if device != "cpu" and precision == "autocast": - init_image = init_image.half() - - modelFS.to(device) - - init_image = repeat(init_image, '1 ... -> b ...', b=batch_size) - init_latent = modelFS.get_first_stage_encoding(modelFS.encode_first_stage(init_image)) # move to latent space - - if req.mask is not None: - mask = load_mask(req.mask, opt_W, opt_H, init_latent.shape[2], init_latent.shape[3], True).to(device) - mask = mask[0][0].unsqueeze(0).repeat(4, 1, 1).unsqueeze(0) - mask = repeat(mask, '1 ... -> b ...', b=batch_size) - - if device != "cpu" and precision == "autocast": - mask = mask.half() - - move_fs_to_cpu() - - assert 0. <= opt_strength <= 1., 'can only work with strength in [0.0, 1.0]' - t_enc = int(opt_strength * opt_ddim_steps) - print(f"target t_enc is {t_enc} steps") - - if opt_save_to_disk_path is not None: - session_out_path = os.path.join(opt_save_to_disk_path, req.session_id) - os.makedirs(session_out_path, exist_ok=True) - else: - session_out_path = None - - seeds = "" - with torch.no_grad(): - for n in trange(opt_n_iter, desc="Sampling"): - for prompts in tqdm(data, desc="data"): - - with precision_scope("cuda"): - modelCS.to(device) - uc = None - if opt_scale != 1.0: - uc = modelCS.get_learned_conditioning(batch_size * [req.negative_prompt]) - if isinstance(prompts, tuple): - prompts = list(prompts) - - subprompts, weights = split_weighted_subprompts(prompts[0]) - if len(subprompts) > 1: - c = torch.zeros_like(uc) - totalWeight = sum(weights) - # normalize each "sub prompt" and add it - for i in range(len(subprompts)): - weight = weights[i] - # if not skip_normalize: - weight = weight / totalWeight - c = torch.add(c, modelCS.get_learned_conditioning(subprompts[i]), alpha=weight) - else: - c = modelCS.get_learned_conditioning(prompts) - - modelFS.to(device) - - partial_x_samples = None - def img_callback(x_samples, i): - nonlocal partial_x_samples - - partial_x_samples = x_samples - - if req.stream_progress_updates: - n_steps = opt_ddim_steps if req.init_image is None else t_enc - progress = {"step": i, "total_steps": n_steps} - - if req.stream_image_progress and i % 5 == 0: - partial_images = [] - - for i in range(batch_size): - x_samples_ddim = modelFS.decode_first_stage(x_samples[i].unsqueeze(0)) - x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) - x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c") - x_sample = x_sample.astype(np.uint8) - img = Image.fromarray(x_sample) - buf = BytesIO() - img.save(buf, format='JPEG') - buf.seek(0) - - del img, x_sample, x_samples_ddim - # don't delete x_samples, it is used in the code that called this callback - - temp_images[str(req.session_id) + '/' + str(i)] = buf - partial_images.append({'path': f'/image/tmp/{req.session_id}/{i}'}) - - progress['output'] = partial_images - - yield json.dumps(progress) - - if stop_processing: - raise UserInitiatedStop("User requested that we stop processing") - - # run the handler - try: - if handler == _txt2img: - x_samples = _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, opt_sampler_name) - else: - x_samples = _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask) - - yield from x_samples - - x_samples = partial_x_samples - except UserInitiatedStop: - if partial_x_samples is None: - continue - - x_samples = partial_x_samples - - print("saving images") - for i in range(batch_size): - - x_samples_ddim = modelFS.decode_first_stage(x_samples[i].unsqueeze(0)) - x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) - x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c") - x_sample = x_sample.astype(np.uint8) - img = Image.fromarray(x_sample) - - has_filters = (opt_use_face_correction is not None and opt_use_face_correction.startswith('GFPGAN')) or \ - (opt_use_upscale is not None and opt_use_upscale.startswith('RealESRGAN')) - - return_orig_img = not has_filters or not opt_show_only_filtered - - if stop_processing: - return_orig_img = True - - if opt_save_to_disk_path is not None: - prompt_flattened = filename_regex.sub('_', prompts[0]) - prompt_flattened = prompt_flattened[:50] - - img_id = str(uuid.uuid4())[-8:] - - file_path = f"{prompt_flattened}_{img_id}" - img_out_path = os.path.join(session_out_path, f"{file_path}.{opt_format}") - meta_out_path = os.path.join(session_out_path, f"{file_path}.txt") - - if return_orig_img: - save_image(img, img_out_path) - - save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_strength, opt_use_face_correction, opt_use_upscale, opt_sampler_name, req.negative_prompt, ckpt_file) - - if return_orig_img: - img_data = img_to_base64_str(img, opt_format) - res_image_orig = ResponseImage(data=img_data, seed=opt_seed) - res.images.append(res_image_orig) - - if opt_save_to_disk_path is not None: - res_image_orig.path_abs = img_out_path - - del img - - if has_filters and not stop_processing: - print('Applying filters..') - - gc() - filters_applied = [] - - if opt_use_face_correction: - _, _, output = model_gfpgan.enhance(x_sample[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True) - x_sample = output[:,:,::-1] - filters_applied.append(opt_use_face_correction) - - if opt_use_upscale: - output, _ = model_real_esrgan.enhance(x_sample[:,:,::-1]) - x_sample = output[:,:,::-1] - filters_applied.append(opt_use_upscale) - - filtered_image = Image.fromarray(x_sample) - - filtered_img_data = img_to_base64_str(filtered_image, opt_format) - res_image_filtered = ResponseImage(data=filtered_img_data, seed=opt_seed) - res.images.append(res_image_filtered) - - filters_applied = "_".join(filters_applied) - - if opt_save_to_disk_path is not None: - filtered_img_out_path = os.path.join(session_out_path, f"{file_path}_{filters_applied}.{opt_format}") - save_image(filtered_image, filtered_img_out_path) - res_image_filtered.path_abs = filtered_img_out_path - - del filtered_image - - seeds += str(opt_seed) + "," - opt_seed += 1 - - move_fs_to_cpu() - gc() - del x_samples, x_samples_ddim, x_sample - print("memory_final = ", torch.cuda.memory_allocated() / 1e6) - - print('Task completed') - - yield json.dumps(res.json()) - -def save_image(img, img_out_path): - try: - img.save(img_out_path) - except: - print('could not save the file', traceback.format_exc()) - -def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_prompt_strength, opt_correct_face, opt_upscale, sampler_name, negative_prompt, ckpt_file): - metadata = f"{prompts[0]}\nWidth: {opt_W}\nHeight: {opt_H}\nSeed: {opt_seed}\nSteps: {opt_ddim_steps}\nGuidance Scale: {opt_scale}\nPrompt Strength: {opt_prompt_strength}\nUse Face Correction: {opt_correct_face}\nUse Upscaling: {opt_upscale}\nSampler: {sampler_name}\nNegative Prompt: {negative_prompt}\nStable Diffusion Model: {ckpt_file + '.ckpt'}" - - try: - with open(meta_out_path, 'w') as f: - f.write(metadata) - except: - print('could not save the file', traceback.format_exc()) - -def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, sampler_name): - shape = [opt_n_samples, opt_C, opt_H // opt_f, opt_W // opt_f] - - if device != "cpu": - mem = torch.cuda.memory_allocated() / 1e6 - modelCS.to("cpu") - while torch.cuda.memory_allocated() / 1e6 >= mem: - time.sleep(1) - - if sampler_name == 'ddim': - model.make_schedule(ddim_num_steps=opt_ddim_steps, ddim_eta=opt_ddim_eta, verbose=False) - - samples_ddim = model.sample( - S=opt_ddim_steps, - conditioning=c, - seed=opt_seed, - shape=shape, - verbose=False, - unconditional_guidance_scale=opt_scale, - unconditional_conditioning=uc, - eta=opt_ddim_eta, - x_T=start_code, - img_callback=img_callback, - mask=mask, - sampler = sampler_name, - ) - - yield from samples_ddim - -def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask): - # encode (scaled latent) - z_enc = model.stochastic_encode( - init_latent, - torch.tensor([t_enc] * batch_size).to(device), - opt_seed, - opt_ddim_eta, - opt_ddim_steps, - ) - x_T = None if mask is None else init_latent - - # decode it - samples_ddim = model.sample( - t_enc, - c, - z_enc, - unconditional_guidance_scale=opt_scale, - unconditional_conditioning=uc, - img_callback=img_callback, - mask=mask, - x_T=x_T, - sampler = 'ddim' - ) - - yield from samples_ddim - -def move_fs_to_cpu(): - if device != "cpu": - mem = torch.cuda.memory_allocated() / 1e6 - modelFS.to("cpu") - while torch.cuda.memory_allocated() / 1e6 >= mem: - time.sleep(1) - -def gc(): - if device == 'cpu': - return - - torch.cuda.empty_cache() - torch.cuda.ipc_collect() - -# internal - -def chunk(it, size): - it = iter(it) - return iter(lambda: tuple(islice(it, size)), ()) - - -def load_model_from_config(ckpt, verbose=False): - print(f"Loading model from {ckpt}") - pl_sd = torch.load(ckpt, map_location="cpu") - if "global_step" in pl_sd: - print(f"Global Step: {pl_sd['global_step']}") - sd = pl_sd["state_dict"] - return sd - -# utils -class UserInitiatedStop(Exception): - pass - -def load_img(img_str, w0, h0): - image = base64_str_to_img(img_str).convert("RGB") - w, h = image.size - print(f"loaded input image of size ({w}, {h}) from base64") - if h0 is not None and w0 is not None: - h, w = h0, w0 - - w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64 - image = image.resize((w, h), resample=Image.Resampling.LANCZOS) - image = np.array(image).astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - image = torch.from_numpy(image) - return 2.*image - 1. - -def load_mask(mask_str, h0, w0, newH, newW, invert=False): - image = base64_str_to_img(mask_str).convert("RGB") - w, h = image.size - print(f"loaded input mask of size ({w}, {h})") - - if invert: - print("inverted") - image = ImageOps.invert(image) - # where_0, where_1 = np.where(image == 0), np.where(image == 255) - # image[where_0], image[where_1] = 255, 0 - - if h0 is not None and w0 is not None: - h, w = h0, w0 - - w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64 - - print(f"New mask size ({w}, {h})") - image = image.resize((newW, newH), resample=Image.Resampling.LANCZOS) - image = np.array(image) - - image = image.astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - image = torch.from_numpy(image) - return image - -# https://stackoverflow.com/a/61114178 -def img_to_base64_str(img, output_format="PNG"): - buffered = BytesIO() - img.save(buffered, format=output_format) - buffered.seek(0) - img_byte = buffered.getvalue() - img_str = "data:image/png;base64," + base64.b64encode(img_byte).decode() - return img_str - -def base64_str_to_img(img_str): - img_str = img_str[len("data:image/png;base64,"):] - data = base64.b64decode(img_str) - buffered = BytesIO(data) - img = Image.open(buffered) - return img diff --git a/spaces/awacke1/04-Gradio-SOTA/README.md b/spaces/awacke1/04-Gradio-SOTA/README.md deleted file mode 100644 index 274049a25ae12310d89090ca92b0d5bf9aa82dd4..0000000000000000000000000000000000000000 --- a/spaces/awacke1/04-Gradio-SOTA/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 04 Gradio SOTA -emoji: 💻 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/4-GeneratorCalcPipe/app.py b/spaces/awacke1/4-GeneratorCalcPipe/app.py deleted file mode 100644 index 74df353b21c4d365706e2567741d6e570b80a555..0000000000000000000000000000000000000000 --- a/spaces/awacke1/4-GeneratorCalcPipe/app.py +++ /dev/null @@ -1,108 +0,0 @@ -import gradio as gr -import os - -# PersistDataset ----- -import os -import csv -import gradio as gr -from gradio import inputs, outputs -import huggingface_hub -from huggingface_hub import Repository, hf_hub_download, upload_file -from datetime import datetime - -# created new dataset as awacke1/MindfulStory.csv -#DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/MindfulStory.csv" -#DATASET_REPO_ID = "awacke1/MindfulStory.csv" -#DATA_FILENAME = "MindfulStory.csv" -#DATA_FILE = os.path.join("data", DATA_FILENAME) -HF_TOKEN = os.environ.get("HF_TOKEN") - -# Download dataset repo using hub download -#try: -# hf_hub_download( -# repo_id=DATASET_REPO_ID, -# filename=DATA_FILENAME, -# cache_dir=DATA_DIRNAME, -# force_filename=DATA_FILENAME -# ) -#except: -# print("file not found") - -#def AIMemory(title: str, story: str): -# if title and story: -# with open(DATA_FILE, "a") as csvfile: -# writer = csv.DictWriter(csvfile, fieldnames=["title", "story", "time"]) -# writer.writerow({"title": title, "story": story, "time": str(datetime.now())}) - # uncomment line below to begin saving your changes - #commit_url = repo.push_to_hub() -# return "" - - -# Set up cloned dataset from repo for operations -#repo = Repository( -# local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN -#) - -#generator1 = gr.Interface.load("bigscience/bloom", api_key=HF_TOKEN) - - -generator1 = gr.Interface.load("huggingface/gpt2-large", api_key=HF_TOKEN) -generator2 = gr.Interface.load("huggingface/EleutherAI/gpt-neo-2.7B", api_key=HF_TOKEN) -generator3 = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B", api_key=HF_TOKEN) - - -def calculator(intro, operator, outro): - if operator == "add": - output = generator2(intro) + generator3(outro) - title = intro + " " + outro -# saved = AIMemory(title, output) - return output - elif operator == "subtract": - output = generator2(outro) + generator3(intro) - title = outro + " " + intro -# saved = AIMemory(title, output) - output = output.replace(intro, "").replace(outro, "") - return output - elif operator == "multiply": - output = generator1(intro) + generator2(outro) + generator3(intro) - title = intro + " " + outro + " " + intro -# saved = AIMemory(title, output) - return output - elif operator == "divide": - output = generator1(outro) + generator2(intro) + generator3(outro) - title = outro + " " + intro + " " + outro -# saved = AIMemory(title, output) - output = output.replace(intro, "").replace(outro, "") - return output - -#with open('Mindfulness.txt', 'r') as file: -# context = file.read() -#contextBox = gr.Textbox(lines=3, default=context, label="Story starter") - -examples = [ - ["Asynchronous Telemedicine", "multiply", "Provide remote care services live addressing provider shortages"], - ["Ambient and emotion AI", "multiply", "rtificial intelligence showing empathy and compassion, reducing biases making us feel cared for and assist lifestyle"], - ["import gradio as gr", "multiply", "import streamlit as st"], - ["Skin Patch", "multiply", "Allow technology to measure blood pressure, glucose, reducing huge bulky devices"], - ["Affordable vein scanner", "multiply", "View veins through skin"], - ["Synthetic medical records", "multiply", "Create synthetic medical records using GANS trained to create synthetic data"], - ["Blood draw devices used in clinical trials", "multiply", "So you dont have to go to physical location, engagement during trials"], - ["Smart TVs being used for remote care", "multiply", "Video chat and recordings for remote care consultations"], - ["Why does a chicken coop have two doors? Because if had four doors it would be a chicken sedan!", "multiply", "Why did the chicken cross the park? To get to the other slide."], - ["What type of shoes do ninjas wear? Sneakers", "add", "Can a ninja bring a ninja star into the airport? Shuriken."], - ["To save the planet with good looks and comedy find your", "multiply", "Everybody laughed at me when I told them I was going to be a comedian. I thought well, thats not bad for a start."] -] - -demo = gr.Interface( - calculator, - [ - "text", - gr.Radio(["add", "subtract", "multiply", "divide"]), - "text" - ], - "text", - examples=examples, - article="Saved story memory dataset: https://huggingface.co/datasets/awacke1/MindfulStory.csv with available models to use from text gen: https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads", - live=True, -) -demo.launch() \ No newline at end of file diff --git a/spaces/awacke1/HealthyBrainAging/README.md b/spaces/awacke1/HealthyBrainAging/README.md deleted file mode 100644 index e3e063b1915faf8c1b6599240691962e3a13bd5b..0000000000000000000000000000000000000000 --- a/spaces/awacke1/HealthyBrainAging/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: HealthyBrainAging -emoji: ⚡ -colorFrom: purple -colorTo: red -sdk: streamlit -sdk_version: 1.27.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/PhysicsRacingDemoWith3DARVR/index.html b/spaces/awacke1/PhysicsRacingDemoWith3DARVR/index.html deleted file mode 100644 index f3d8fd0ea9a4162d42189755deaaec8c72e1941e..0000000000000000000000000000000000000000 --- a/spaces/awacke1/PhysicsRacingDemoWith3DARVR/index.html +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - My static Space - - - -

      SimPhysics

      -

      User input: WASD

      -

      This WebGL demo demonstrates PlayCanvas and a physics vehicle simulation that is web based and playable anywhere your browser goes🤗 Inference API.

      -

      Source code is in Readme.md file.

      -

      PlayCanvas project is here

      -
      - -
      - - diff --git a/spaces/awacke1/Zero-Shot-Classification-valhalla-distilbart-mnli-12-1/README.md b/spaces/awacke1/Zero-Shot-Classification-valhalla-distilbart-mnli-12-1/README.md deleted file mode 100644 index c1454ad9475598d5aefd00b2c9663f51ea9db276..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Zero-Shot-Classification-valhalla-distilbart-mnli-12-1/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Zero Shot Classification Valhalla Distilbart Mnli 12 1 -emoji: 🐢 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/facebook-fastspeech2-en-ljspeech-0731/app.py b/spaces/awacke1/facebook-fastspeech2-en-ljspeech-0731/app.py deleted file mode 100644 index 624711103fff0eb591bc05f07ae20c47fbe03cd2..0000000000000000000000000000000000000000 --- a/spaces/awacke1/facebook-fastspeech2-en-ljspeech-0731/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/facebook/fastspeech2-en-ljspeech").launch() \ No newline at end of file diff --git a/spaces/badayvedat/AudioSep/models/CLAP/open_clip/timm_model.py b/spaces/badayvedat/AudioSep/models/CLAP/open_clip/timm_model.py deleted file mode 100644 index c9d1ab4666b5bab5038d44b90c9ddca5087de460..0000000000000000000000000000000000000000 --- a/spaces/badayvedat/AudioSep/models/CLAP/open_clip/timm_model.py +++ /dev/null @@ -1,112 +0,0 @@ -""" timm model adapter - -Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model. -""" -from collections import OrderedDict - -import torch.nn as nn - -try: - import timm - from timm.models.layers import Mlp, to_2tuple - from timm.models.layers.attention_pool2d import RotAttentionPool2d - from timm.models.layers.attention_pool2d import ( - AttentionPool2d as AbsAttentionPool2d, - ) -except ImportError as e: - timm = None - -from .utils import freeze_batch_norm_2d - - -class TimmModel(nn.Module): - """timm model adapter - # FIXME this adapter is a work in progress, may change in ways that break weight compat - """ - - def __init__( - self, - model_name, - embed_dim, - image_size=224, - pool="avg", - proj="linear", - drop=0.0, - pretrained=False, - ): - super().__init__() - if timm is None: - raise RuntimeError("Please `pip install timm` to use timm models.") - - self.image_size = to_2tuple(image_size) - self.trunk = timm.create_model(model_name, pretrained=pretrained) - feat_size = self.trunk.default_cfg.get("pool_size", None) - feature_ndim = 1 if not feat_size else 2 - if pool in ("abs_attn", "rot_attn"): - assert feature_ndim == 2 - # if attn pooling used, remove both classifier and default pool - self.trunk.reset_classifier(0, global_pool="") - else: - # reset global pool if pool config set, otherwise leave as network default - reset_kwargs = dict(global_pool=pool) if pool else {} - self.trunk.reset_classifier(0, **reset_kwargs) - prev_chs = self.trunk.num_features - - head_layers = OrderedDict() - if pool == "abs_attn": - head_layers["pool"] = AbsAttentionPool2d( - prev_chs, feat_size=feat_size, out_features=embed_dim - ) - prev_chs = embed_dim - elif pool == "rot_attn": - head_layers["pool"] = RotAttentionPool2d(prev_chs, out_features=embed_dim) - prev_chs = embed_dim - else: - assert proj, "projection layer needed if non-attention pooling is used." - - # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used - if proj == "linear": - head_layers["drop"] = nn.Dropout(drop) - head_layers["proj"] = nn.Linear(prev_chs, embed_dim) - elif proj == "mlp": - head_layers["mlp"] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop) - - self.head = nn.Sequential(head_layers) - - def lock(self, unlocked_groups=0, freeze_bn_stats=False): - """lock modules - Args: - unlocked_groups (int): leave last n layer groups unlocked (default: 0) - """ - if not unlocked_groups: - # lock full model - for param in self.trunk.parameters(): - param.requires_grad = False - if freeze_bn_stats: - freeze_batch_norm_2d(self.trunk) - else: - # NOTE: partial freeze requires latest timm (master) branch and is subject to change - try: - # FIXME import here until API stable and in an official release - from timm.models.helpers import group_parameters, group_modules - except ImportError: - raise RuntimeError( - "Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`" - ) - matcher = self.trunk.group_matcher() - gparams = group_parameters(self.trunk, matcher) - max_layer_id = max(gparams.keys()) - max_layer_id = max_layer_id - unlocked_groups - for group_idx in range(max_layer_id + 1): - group = gparams[group_idx] - for param in group: - self.trunk.get_parameter(param).requires_grad = False - if freeze_bn_stats: - gmodules = group_modules(self.trunk, matcher, reverse=True) - gmodules = {k for k, v in gmodules.items() if v <= max_layer_id} - freeze_batch_norm_2d(self.trunk, gmodules) - - def forward(self, x): - x = self.trunk(x) - x = self.head(x) - return x diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/inputs/ScreenNode.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/inputs/ScreenNode.js deleted file mode 100644 index 76da4d18b1f6ad5c6ad897d3c38e6ac7cef0aa06..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/inputs/ScreenNode.js +++ /dev/null @@ -1,30 +0,0 @@ -/** - * @author sunag / http://www.sunag.com.br/ - */ - -import { InputNode } from '../core/InputNode.js'; -import { TextureNode } from './TextureNode.js'; - -function ScreenNode( uv ) { - - TextureNode.call( this, undefined, uv ); - -} - -ScreenNode.prototype = Object.create( TextureNode.prototype ); -ScreenNode.prototype.constructor = ScreenNode; -ScreenNode.prototype.nodeType = "Screen"; - -ScreenNode.prototype.getUnique = function () { - - return true; - -}; - -ScreenNode.prototype.getTexture = function ( builder, output ) { - - return InputNode.prototype.generate.call( this, builder, output, this.getUuid(), 't', 'renderTexture' ); - -}; - -export { ScreenNode }; diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/FXAAShader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/FXAAShader.js deleted file mode 100644 index 57f593871dff91d738caac200ac383a44082302f..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/FXAAShader.js +++ /dev/null @@ -1,1115 +0,0 @@ -/** - * @author alteredq / http://alteredqualia.com/ - * @author davidedc / http://www.sketchpatch.net/ - * - * NVIDIA FXAA by Timothy Lottes - * http://timothylottes.blogspot.com/2011/06/fxaa3-source-released.html - * - WebGL port by @supereggbert - * http://www.glge.org/demos/fxaa/ - */ - -THREE.FXAAShader = { - - uniforms: { - - "tDiffuse": { value: null }, - "resolution": { value: new THREE.Vector2( 1 / 1024, 1 / 512 ) } - - }, - - vertexShader: [ - - "varying vec2 vUv;", - - "void main() {", - - "vUv = uv;", - "gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );", - - "}" - - ].join( "\n" ), - - fragmentShader: [ - "precision highp float;", - "", - "uniform sampler2D tDiffuse;", - "", - "uniform vec2 resolution;", - "", - "varying vec2 vUv;", - "", - "// FXAA 3.11 implementation by NVIDIA, ported to WebGL by Agost Biro (biro@archilogic.com)", - "", - "//----------------------------------------------------------------------------------", - "// File: es3-kepler\FXAA\assets\shaders/FXAA_DefaultES.frag", - "// SDK Version: v3.00", - "// Email: gameworks@nvidia.com", - "// Site: http://developer.nvidia.com/", - "//", - "// Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.", - "//", - "// Redistribution and use in source and binary forms, with or without", - "// modification, are permitted provided that the following conditions", - "// are met:", - "// * Redistributions of source code must retain the above copyright", - "// notice, this list of conditions and the following disclaimer.", - "// * Redistributions in binary form must reproduce the above copyright", - "// notice, this list of conditions and the following disclaimer in the", - "// documentation and/or other materials provided with the distribution.", - "// * Neither the name of NVIDIA CORPORATION nor the names of its", - "// contributors may be used to endorse or promote products derived", - "// from this software without specific prior written permission.", - "//", - "// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY", - "// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", - "// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", - "// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR", - "// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", - "// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,", - "// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR", - "// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", - "// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", - "// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", - "// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", - "//", - "//----------------------------------------------------------------------------------", - "", - "#define FXAA_PC 1", - "#define FXAA_GLSL_100 1", - "#define FXAA_QUALITY_PRESET 12", - "", - "#define FXAA_GREEN_AS_LUMA 1", - "", - "/*--------------------------------------------------------------------------*/", - "#ifndef FXAA_PC_CONSOLE", - " //", - " // The console algorithm for PC is included", - " // for developers targeting really low spec machines.", - " // Likely better to just run FXAA_PC, and use a really low preset.", - " //", - " #define FXAA_PC_CONSOLE 0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#ifndef FXAA_GLSL_120", - " #define FXAA_GLSL_120 0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#ifndef FXAA_GLSL_130", - " #define FXAA_GLSL_130 0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#ifndef FXAA_HLSL_3", - " #define FXAA_HLSL_3 0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#ifndef FXAA_HLSL_4", - " #define FXAA_HLSL_4 0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#ifndef FXAA_HLSL_5", - " #define FXAA_HLSL_5 0", - "#endif", - "/*==========================================================================*/", - "#ifndef FXAA_GREEN_AS_LUMA", - " //", - " // For those using non-linear color,", - " // and either not able to get luma in alpha, or not wanting to,", - " // this enables FXAA to run using green as a proxy for luma.", - " // So with this enabled, no need to pack luma in alpha.", - " //", - " // This will turn off AA on anything which lacks some amount of green.", - " // Pure red and blue or combination of only R and B, will get no AA.", - " //", - " // Might want to lower the settings for both,", - " // fxaaConsoleEdgeThresholdMin", - " // fxaaQualityEdgeThresholdMin", - " // In order to insure AA does not get turned off on colors", - " // which contain a minor amount of green.", - " //", - " // 1 = On.", - " // 0 = Off.", - " //", - " #define FXAA_GREEN_AS_LUMA 0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#ifndef FXAA_EARLY_EXIT", - " //", - " // Controls algorithm's early exit path.", - " // On PS3 turning this ON adds 2 cycles to the shader.", - " // On 360 turning this OFF adds 10ths of a millisecond to the shader.", - " // Turning this off on console will result in a more blurry image.", - " // So this defaults to on.", - " //", - " // 1 = On.", - " // 0 = Off.", - " //", - " #define FXAA_EARLY_EXIT 1", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#ifndef FXAA_DISCARD", - " //", - " // Only valid for PC OpenGL currently.", - " // Probably will not work when FXAA_GREEN_AS_LUMA = 1.", - " //", - " // 1 = Use discard on pixels which don't need AA.", - " // For APIs which enable concurrent TEX+ROP from same surface.", - " // 0 = Return unchanged color on pixels which don't need AA.", - " //", - " #define FXAA_DISCARD 0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#ifndef FXAA_FAST_PIXEL_OFFSET", - " //", - " // Used for GLSL 120 only.", - " //", - " // 1 = GL API supports fast pixel offsets", - " // 0 = do not use fast pixel offsets", - " //", - " #ifdef GL_EXT_gpu_shader4", - " #define FXAA_FAST_PIXEL_OFFSET 1", - " #endif", - " #ifdef GL_NV_gpu_shader5", - " #define FXAA_FAST_PIXEL_OFFSET 1", - " #endif", - " #ifdef GL_ARB_gpu_shader5", - " #define FXAA_FAST_PIXEL_OFFSET 1", - " #endif", - " #ifndef FXAA_FAST_PIXEL_OFFSET", - " #define FXAA_FAST_PIXEL_OFFSET 0", - " #endif", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#ifndef FXAA_GATHER4_ALPHA", - " //", - " // 1 = API supports gather4 on alpha channel.", - " // 0 = API does not support gather4 on alpha channel.", - " //", - " #if (FXAA_HLSL_5 == 1)", - " #define FXAA_GATHER4_ALPHA 1", - " #endif", - " #ifdef GL_ARB_gpu_shader5", - " #define FXAA_GATHER4_ALPHA 1", - " #endif", - " #ifdef GL_NV_gpu_shader5", - " #define FXAA_GATHER4_ALPHA 1", - " #endif", - " #ifndef FXAA_GATHER4_ALPHA", - " #define FXAA_GATHER4_ALPHA 0", - " #endif", - "#endif", - "", - "", - "/*============================================================================", - " FXAA QUALITY - TUNING KNOBS", - "------------------------------------------------------------------------------", - "NOTE the other tuning knobs are now in the shader function inputs!", - "============================================================================*/", - "#ifndef FXAA_QUALITY_PRESET", - " //", - " // Choose the quality preset.", - " // This needs to be compiled into the shader as it effects code.", - " // Best option to include multiple presets is to", - " // in each shader define the preset, then include this file.", - " //", - " // OPTIONS", - " // -----------------------------------------------------------------------", - " // 10 to 15 - default medium dither (10=fastest, 15=highest quality)", - " // 20 to 29 - less dither, more expensive (20=fastest, 29=highest quality)", - " // 39 - no dither, very expensive", - " //", - " // NOTES", - " // -----------------------------------------------------------------------", - " // 12 = slightly faster then FXAA 3.9 and higher edge quality (default)", - " // 13 = about same speed as FXAA 3.9 and better than 12", - " // 23 = closest to FXAA 3.9 visually and performance wise", - " // _ = the lowest digit is directly related to performance", - " // _ = the highest digit is directly related to style", - " //", - " #define FXAA_QUALITY_PRESET 12", - "#endif", - "", - "", - "/*============================================================================", - "", - " FXAA QUALITY - PRESETS", - "", - "============================================================================*/", - "", - "/*============================================================================", - " FXAA QUALITY - MEDIUM DITHER PRESETS", - "============================================================================*/", - "#if (FXAA_QUALITY_PRESET == 10)", - " #define FXAA_QUALITY_PS 3", - " #define FXAA_QUALITY_P0 1.5", - " #define FXAA_QUALITY_P1 3.0", - " #define FXAA_QUALITY_P2 12.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 11)", - " #define FXAA_QUALITY_PS 4", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 3.0", - " #define FXAA_QUALITY_P3 12.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 12)", - " #define FXAA_QUALITY_PS 5", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 4.0", - " #define FXAA_QUALITY_P4 12.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 13)", - " #define FXAA_QUALITY_PS 6", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 2.0", - " #define FXAA_QUALITY_P4 4.0", - " #define FXAA_QUALITY_P5 12.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 14)", - " #define FXAA_QUALITY_PS 7", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 2.0", - " #define FXAA_QUALITY_P4 2.0", - " #define FXAA_QUALITY_P5 4.0", - " #define FXAA_QUALITY_P6 12.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 15)", - " #define FXAA_QUALITY_PS 8", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 2.0", - " #define FXAA_QUALITY_P4 2.0", - " #define FXAA_QUALITY_P5 2.0", - " #define FXAA_QUALITY_P6 4.0", - " #define FXAA_QUALITY_P7 12.0", - "#endif", - "", - "/*============================================================================", - " FXAA QUALITY - LOW DITHER PRESETS", - "============================================================================*/", - "#if (FXAA_QUALITY_PRESET == 20)", - " #define FXAA_QUALITY_PS 3", - " #define FXAA_QUALITY_P0 1.5", - " #define FXAA_QUALITY_P1 2.0", - " #define FXAA_QUALITY_P2 8.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 21)", - " #define FXAA_QUALITY_PS 4", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 8.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 22)", - " #define FXAA_QUALITY_PS 5", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 2.0", - " #define FXAA_QUALITY_P4 8.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 23)", - " #define FXAA_QUALITY_PS 6", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 2.0", - " #define FXAA_QUALITY_P4 2.0", - " #define FXAA_QUALITY_P5 8.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 24)", - " #define FXAA_QUALITY_PS 7", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 2.0", - " #define FXAA_QUALITY_P4 2.0", - " #define FXAA_QUALITY_P5 3.0", - " #define FXAA_QUALITY_P6 8.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 25)", - " #define FXAA_QUALITY_PS 8", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 2.0", - " #define FXAA_QUALITY_P4 2.0", - " #define FXAA_QUALITY_P5 2.0", - " #define FXAA_QUALITY_P6 4.0", - " #define FXAA_QUALITY_P7 8.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 26)", - " #define FXAA_QUALITY_PS 9", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 2.0", - " #define FXAA_QUALITY_P4 2.0", - " #define FXAA_QUALITY_P5 2.0", - " #define FXAA_QUALITY_P6 2.0", - " #define FXAA_QUALITY_P7 4.0", - " #define FXAA_QUALITY_P8 8.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 27)", - " #define FXAA_QUALITY_PS 10", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 2.0", - " #define FXAA_QUALITY_P4 2.0", - " #define FXAA_QUALITY_P5 2.0", - " #define FXAA_QUALITY_P6 2.0", - " #define FXAA_QUALITY_P7 2.0", - " #define FXAA_QUALITY_P8 4.0", - " #define FXAA_QUALITY_P9 8.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 28)", - " #define FXAA_QUALITY_PS 11", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 2.0", - " #define FXAA_QUALITY_P4 2.0", - " #define FXAA_QUALITY_P5 2.0", - " #define FXAA_QUALITY_P6 2.0", - " #define FXAA_QUALITY_P7 2.0", - " #define FXAA_QUALITY_P8 2.0", - " #define FXAA_QUALITY_P9 4.0", - " #define FXAA_QUALITY_P10 8.0", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_QUALITY_PRESET == 29)", - " #define FXAA_QUALITY_PS 12", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.5", - " #define FXAA_QUALITY_P2 2.0", - " #define FXAA_QUALITY_P3 2.0", - " #define FXAA_QUALITY_P4 2.0", - " #define FXAA_QUALITY_P5 2.0", - " #define FXAA_QUALITY_P6 2.0", - " #define FXAA_QUALITY_P7 2.0", - " #define FXAA_QUALITY_P8 2.0", - " #define FXAA_QUALITY_P9 2.0", - " #define FXAA_QUALITY_P10 4.0", - " #define FXAA_QUALITY_P11 8.0", - "#endif", - "", - "/*============================================================================", - " FXAA QUALITY - EXTREME QUALITY", - "============================================================================*/", - "#if (FXAA_QUALITY_PRESET == 39)", - " #define FXAA_QUALITY_PS 12", - " #define FXAA_QUALITY_P0 1.0", - " #define FXAA_QUALITY_P1 1.0", - " #define FXAA_QUALITY_P2 1.0", - " #define FXAA_QUALITY_P3 1.0", - " #define FXAA_QUALITY_P4 1.0", - " #define FXAA_QUALITY_P5 1.5", - " #define FXAA_QUALITY_P6 2.0", - " #define FXAA_QUALITY_P7 2.0", - " #define FXAA_QUALITY_P8 2.0", - " #define FXAA_QUALITY_P9 2.0", - " #define FXAA_QUALITY_P10 4.0", - " #define FXAA_QUALITY_P11 8.0", - "#endif", - "", - "", - "", - "/*============================================================================", - "", - " API PORTING", - "", - "============================================================================*/", - "#if (FXAA_GLSL_100 == 1) || (FXAA_GLSL_120 == 1) || (FXAA_GLSL_130 == 1)", - " #define FxaaBool bool", - " #define FxaaDiscard discard", - " #define FxaaFloat float", - " #define FxaaFloat2 vec2", - " #define FxaaFloat3 vec3", - " #define FxaaFloat4 vec4", - " #define FxaaHalf float", - " #define FxaaHalf2 vec2", - " #define FxaaHalf3 vec3", - " #define FxaaHalf4 vec4", - " #define FxaaInt2 ivec2", - " #define FxaaSat(x) clamp(x, 0.0, 1.0)", - " #define FxaaTex sampler2D", - "#else", - " #define FxaaBool bool", - " #define FxaaDiscard clip(-1)", - " #define FxaaFloat float", - " #define FxaaFloat2 float2", - " #define FxaaFloat3 float3", - " #define FxaaFloat4 float4", - " #define FxaaHalf half", - " #define FxaaHalf2 half2", - " #define FxaaHalf3 half3", - " #define FxaaHalf4 half4", - " #define FxaaSat(x) saturate(x)", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_GLSL_100 == 1)", - " #define FxaaTexTop(t, p) texture2D(t, p, 0.0)", - " #define FxaaTexOff(t, p, o, r) texture2D(t, p + (o * r), 0.0)", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_GLSL_120 == 1)", - " // Requires,", - " // #version 120", - " // And at least,", - " // #extension GL_EXT_gpu_shader4 : enable", - " // (or set FXAA_FAST_PIXEL_OFFSET 1 to work like DX9)", - " #define FxaaTexTop(t, p) texture2DLod(t, p, 0.0)", - " #if (FXAA_FAST_PIXEL_OFFSET == 1)", - " #define FxaaTexOff(t, p, o, r) texture2DLodOffset(t, p, 0.0, o)", - " #else", - " #define FxaaTexOff(t, p, o, r) texture2DLod(t, p + (o * r), 0.0)", - " #endif", - " #if (FXAA_GATHER4_ALPHA == 1)", - " // use #extension GL_ARB_gpu_shader5 : enable", - " #define FxaaTexAlpha4(t, p) textureGather(t, p, 3)", - " #define FxaaTexOffAlpha4(t, p, o) textureGatherOffset(t, p, o, 3)", - " #define FxaaTexGreen4(t, p) textureGather(t, p, 1)", - " #define FxaaTexOffGreen4(t, p, o) textureGatherOffset(t, p, o, 1)", - " #endif", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_GLSL_130 == 1)", - " // Requires \"#version 130\" or better", - " #define FxaaTexTop(t, p) textureLod(t, p, 0.0)", - " #define FxaaTexOff(t, p, o, r) textureLodOffset(t, p, 0.0, o)", - " #if (FXAA_GATHER4_ALPHA == 1)", - " // use #extension GL_ARB_gpu_shader5 : enable", - " #define FxaaTexAlpha4(t, p) textureGather(t, p, 3)", - " #define FxaaTexOffAlpha4(t, p, o) textureGatherOffset(t, p, o, 3)", - " #define FxaaTexGreen4(t, p) textureGather(t, p, 1)", - " #define FxaaTexOffGreen4(t, p, o) textureGatherOffset(t, p, o, 1)", - " #endif", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_HLSL_3 == 1)", - " #define FxaaInt2 float2", - " #define FxaaTex sampler2D", - " #define FxaaTexTop(t, p) tex2Dlod(t, float4(p, 0.0, 0.0))", - " #define FxaaTexOff(t, p, o, r) tex2Dlod(t, float4(p + (o * r), 0, 0))", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_HLSL_4 == 1)", - " #define FxaaInt2 int2", - " struct FxaaTex { SamplerState smpl; Texture2D tex; };", - " #define FxaaTexTop(t, p) t.tex.SampleLevel(t.smpl, p, 0.0)", - " #define FxaaTexOff(t, p, o, r) t.tex.SampleLevel(t.smpl, p, 0.0, o)", - "#endif", - "/*--------------------------------------------------------------------------*/", - "#if (FXAA_HLSL_5 == 1)", - " #define FxaaInt2 int2", - " struct FxaaTex { SamplerState smpl; Texture2D tex; };", - " #define FxaaTexTop(t, p) t.tex.SampleLevel(t.smpl, p, 0.0)", - " #define FxaaTexOff(t, p, o, r) t.tex.SampleLevel(t.smpl, p, 0.0, o)", - " #define FxaaTexAlpha4(t, p) t.tex.GatherAlpha(t.smpl, p)", - " #define FxaaTexOffAlpha4(t, p, o) t.tex.GatherAlpha(t.smpl, p, o)", - " #define FxaaTexGreen4(t, p) t.tex.GatherGreen(t.smpl, p)", - " #define FxaaTexOffGreen4(t, p, o) t.tex.GatherGreen(t.smpl, p, o)", - "#endif", - "", - "", - "/*============================================================================", - " GREEN AS LUMA OPTION SUPPORT FUNCTION", - "============================================================================*/", - "#if (FXAA_GREEN_AS_LUMA == 0)", - " FxaaFloat FxaaLuma(FxaaFloat4 rgba) { return rgba.w; }", - "#else", - " FxaaFloat FxaaLuma(FxaaFloat4 rgba) { return rgba.y; }", - "#endif", - "", - "", - "", - "", - "/*============================================================================", - "", - " FXAA3 QUALITY - PC", - "", - "============================================================================*/", - "#if (FXAA_PC == 1)", - "/*--------------------------------------------------------------------------*/", - "FxaaFloat4 FxaaPixelShader(", - " //", - " // Use noperspective interpolation here (turn off perspective interpolation).", - " // {xy} = center of pixel", - " FxaaFloat2 pos,", - " //", - " // Used only for FXAA Console, and not used on the 360 version.", - " // Use noperspective interpolation here (turn off perspective interpolation).", - " // {xy_} = upper left of pixel", - " // {_zw} = lower right of pixel", - " FxaaFloat4 fxaaConsolePosPos,", - " //", - " // Input color texture.", - " // {rgb_} = color in linear or perceptual color space", - " // if (FXAA_GREEN_AS_LUMA == 0)", - " // {__a} = luma in perceptual color space (not linear)", - " FxaaTex tex,", - " //", - " // Only used on the optimized 360 version of FXAA Console.", - " // For everything but 360, just use the same input here as for \"tex\".", - " // For 360, same texture, just alias with a 2nd sampler.", - " // This sampler needs to have an exponent bias of -1.", - " FxaaTex fxaaConsole360TexExpBiasNegOne,", - " //", - " // Only used on the optimized 360 version of FXAA Console.", - " // For everything but 360, just use the same input here as for \"tex\".", - " // For 360, same texture, just alias with a 3nd sampler.", - " // This sampler needs to have an exponent bias of -2.", - " FxaaTex fxaaConsole360TexExpBiasNegTwo,", - " //", - " // Only used on FXAA Quality.", - " // This must be from a constant/uniform.", - " // {x_} = 1.0/screenWidthInPixels", - " // {_y} = 1.0/screenHeightInPixels", - " FxaaFloat2 fxaaQualityRcpFrame,", - " //", - " // Only used on FXAA Console.", - " // This must be from a constant/uniform.", - " // This effects sub-pixel AA quality and inversely sharpness.", - " // Where N ranges between,", - " // N = 0.50 (default)", - " // N = 0.33 (sharper)", - " // {x__} = -N/screenWidthInPixels", - " // {_y_} = -N/screenHeightInPixels", - " // {_z_} = N/screenWidthInPixels", - " // {__w} = N/screenHeightInPixels", - " FxaaFloat4 fxaaConsoleRcpFrameOpt,", - " //", - " // Only used on FXAA Console.", - " // Not used on 360, but used on PS3 and PC.", - " // This must be from a constant/uniform.", - " // {x__} = -2.0/screenWidthInPixels", - " // {_y_} = -2.0/screenHeightInPixels", - " // {_z_} = 2.0/screenWidthInPixels", - " // {__w} = 2.0/screenHeightInPixels", - " FxaaFloat4 fxaaConsoleRcpFrameOpt2,", - " //", - " // Only used on FXAA Console.", - " // Only used on 360 in place of fxaaConsoleRcpFrameOpt2.", - " // This must be from a constant/uniform.", - " // {x__} = 8.0/screenWidthInPixels", - " // {_y_} = 8.0/screenHeightInPixels", - " // {_z_} = -4.0/screenWidthInPixels", - " // {__w} = -4.0/screenHeightInPixels", - " FxaaFloat4 fxaaConsole360RcpFrameOpt2,", - " //", - " // Only used on FXAA Quality.", - " // This used to be the FXAA_QUALITY_SUBPIX define.", - " // It is here now to allow easier tuning.", - " // Choose the amount of sub-pixel aliasing removal.", - " // This can effect sharpness.", - " // 1.00 - upper limit (softer)", - " // 0.75 - default amount of filtering", - " // 0.50 - lower limit (sharper, less sub-pixel aliasing removal)", - " // 0.25 - almost off", - " // 0.00 - completely off", - " FxaaFloat fxaaQualitySubpix,", - " //", - " // Only used on FXAA Quality.", - " // This used to be the FXAA_QUALITY_EDGE_THRESHOLD define.", - " // It is here now to allow easier tuning.", - " // The minimum amount of local contrast required to apply algorithm.", - " // 0.333 - too little (faster)", - " // 0.250 - low quality", - " // 0.166 - default", - " // 0.125 - high quality", - " // 0.063 - overkill (slower)", - " FxaaFloat fxaaQualityEdgeThreshold,", - " //", - " // Only used on FXAA Quality.", - " // This used to be the FXAA_QUALITY_EDGE_THRESHOLD_MIN define.", - " // It is here now to allow easier tuning.", - " // Trims the algorithm from processing darks.", - " // 0.0833 - upper limit (default, the start of visible unfiltered edges)", - " // 0.0625 - high quality (faster)", - " // 0.0312 - visible limit (slower)", - " // Special notes when using FXAA_GREEN_AS_LUMA,", - " // Likely want to set this to zero.", - " // As colors that are mostly not-green", - " // will appear very dark in the green channel!", - " // Tune by looking at mostly non-green content,", - " // then start at zero and increase until aliasing is a problem.", - " FxaaFloat fxaaQualityEdgeThresholdMin,", - " //", - " // Only used on FXAA Console.", - " // This used to be the FXAA_CONSOLE_EDGE_SHARPNESS define.", - " // It is here now to allow easier tuning.", - " // This does not effect PS3, as this needs to be compiled in.", - " // Use FXAA_CONSOLE_PS3_EDGE_SHARPNESS for PS3.", - " // Due to the PS3 being ALU bound,", - " // there are only three safe values here: 2 and 4 and 8.", - " // These options use the shaders ability to a free *|/ by 2|4|8.", - " // For all other platforms can be a non-power of two.", - " // 8.0 is sharper (default!!!)", - " // 4.0 is softer", - " // 2.0 is really soft (good only for vector graphics inputs)", - " FxaaFloat fxaaConsoleEdgeSharpness,", - " //", - " // Only used on FXAA Console.", - " // This used to be the FXAA_CONSOLE_EDGE_THRESHOLD define.", - " // It is here now to allow easier tuning.", - " // This does not effect PS3, as this needs to be compiled in.", - " // Use FXAA_CONSOLE_PS3_EDGE_THRESHOLD for PS3.", - " // Due to the PS3 being ALU bound,", - " // there are only two safe values here: 1/4 and 1/8.", - " // These options use the shaders ability to a free *|/ by 2|4|8.", - " // The console setting has a different mapping than the quality setting.", - " // Other platforms can use other values.", - " // 0.125 leaves less aliasing, but is softer (default!!!)", - " // 0.25 leaves more aliasing, and is sharper", - " FxaaFloat fxaaConsoleEdgeThreshold,", - " //", - " // Only used on FXAA Console.", - " // This used to be the FXAA_CONSOLE_EDGE_THRESHOLD_MIN define.", - " // It is here now to allow easier tuning.", - " // Trims the algorithm from processing darks.", - " // The console setting has a different mapping than the quality setting.", - " // This only applies when FXAA_EARLY_EXIT is 1.", - " // This does not apply to PS3,", - " // PS3 was simplified to avoid more shader instructions.", - " // 0.06 - faster but more aliasing in darks", - " // 0.05 - default", - " // 0.04 - slower and less aliasing in darks", - " // Special notes when using FXAA_GREEN_AS_LUMA,", - " // Likely want to set this to zero.", - " // As colors that are mostly not-green", - " // will appear very dark in the green channel!", - " // Tune by looking at mostly non-green content,", - " // then start at zero and increase until aliasing is a problem.", - " FxaaFloat fxaaConsoleEdgeThresholdMin,", - " //", - " // Extra constants for 360 FXAA Console only.", - " // Use zeros or anything else for other platforms.", - " // These must be in physical constant registers and NOT immediates.", - " // Immediates will result in compiler un-optimizing.", - " // {xyzw} = float4(1.0, -1.0, 0.25, -0.25)", - " FxaaFloat4 fxaaConsole360ConstDir", - ") {", - "/*--------------------------------------------------------------------------*/", - " FxaaFloat2 posM;", - " posM.x = pos.x;", - " posM.y = pos.y;", - " #if (FXAA_GATHER4_ALPHA == 1)", - " #if (FXAA_DISCARD == 0)", - " FxaaFloat4 rgbyM = FxaaTexTop(tex, posM);", - " #if (FXAA_GREEN_AS_LUMA == 0)", - " #define lumaM rgbyM.w", - " #else", - " #define lumaM rgbyM.y", - " #endif", - " #endif", - " #if (FXAA_GREEN_AS_LUMA == 0)", - " FxaaFloat4 luma4A = FxaaTexAlpha4(tex, posM);", - " FxaaFloat4 luma4B = FxaaTexOffAlpha4(tex, posM, FxaaInt2(-1, -1));", - " #else", - " FxaaFloat4 luma4A = FxaaTexGreen4(tex, posM);", - " FxaaFloat4 luma4B = FxaaTexOffGreen4(tex, posM, FxaaInt2(-1, -1));", - " #endif", - " #if (FXAA_DISCARD == 1)", - " #define lumaM luma4A.w", - " #endif", - " #define lumaE luma4A.z", - " #define lumaS luma4A.x", - " #define lumaSE luma4A.y", - " #define lumaNW luma4B.w", - " #define lumaN luma4B.z", - " #define lumaW luma4B.x", - " #else", - " FxaaFloat4 rgbyM = FxaaTexTop(tex, posM);", - " #if (FXAA_GREEN_AS_LUMA == 0)", - " #define lumaM rgbyM.w", - " #else", - " #define lumaM rgbyM.y", - " #endif", - " #if (FXAA_GLSL_100 == 1)", - " FxaaFloat lumaS = FxaaLuma(FxaaTexOff(tex, posM, FxaaFloat2( 0.0, 1.0), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaE = FxaaLuma(FxaaTexOff(tex, posM, FxaaFloat2( 1.0, 0.0), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaN = FxaaLuma(FxaaTexOff(tex, posM, FxaaFloat2( 0.0,-1.0), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaW = FxaaLuma(FxaaTexOff(tex, posM, FxaaFloat2(-1.0, 0.0), fxaaQualityRcpFrame.xy));", - " #else", - " FxaaFloat lumaS = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2( 0, 1), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaE = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2( 1, 0), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaN = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2( 0,-1), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaW = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2(-1, 0), fxaaQualityRcpFrame.xy));", - " #endif", - " #endif", - "/*--------------------------------------------------------------------------*/", - " FxaaFloat maxSM = max(lumaS, lumaM);", - " FxaaFloat minSM = min(lumaS, lumaM);", - " FxaaFloat maxESM = max(lumaE, maxSM);", - " FxaaFloat minESM = min(lumaE, minSM);", - " FxaaFloat maxWN = max(lumaN, lumaW);", - " FxaaFloat minWN = min(lumaN, lumaW);", - " FxaaFloat rangeMax = max(maxWN, maxESM);", - " FxaaFloat rangeMin = min(minWN, minESM);", - " FxaaFloat rangeMaxScaled = rangeMax * fxaaQualityEdgeThreshold;", - " FxaaFloat range = rangeMax - rangeMin;", - " FxaaFloat rangeMaxClamped = max(fxaaQualityEdgeThresholdMin, rangeMaxScaled);", - " FxaaBool earlyExit = range < rangeMaxClamped;", - "/*--------------------------------------------------------------------------*/", - " if(earlyExit)", - " #if (FXAA_DISCARD == 1)", - " FxaaDiscard;", - " #else", - " return rgbyM;", - " #endif", - "/*--------------------------------------------------------------------------*/", - " #if (FXAA_GATHER4_ALPHA == 0)", - " #if (FXAA_GLSL_100 == 1)", - " FxaaFloat lumaNW = FxaaLuma(FxaaTexOff(tex, posM, FxaaFloat2(-1.0,-1.0), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaSE = FxaaLuma(FxaaTexOff(tex, posM, FxaaFloat2( 1.0, 1.0), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaNE = FxaaLuma(FxaaTexOff(tex, posM, FxaaFloat2( 1.0,-1.0), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaSW = FxaaLuma(FxaaTexOff(tex, posM, FxaaFloat2(-1.0, 1.0), fxaaQualityRcpFrame.xy));", - " #else", - " FxaaFloat lumaNW = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2(-1,-1), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaSE = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2( 1, 1), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaNE = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2( 1,-1), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaSW = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2(-1, 1), fxaaQualityRcpFrame.xy));", - " #endif", - " #else", - " FxaaFloat lumaNE = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2(1, -1), fxaaQualityRcpFrame.xy));", - " FxaaFloat lumaSW = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2(-1, 1), fxaaQualityRcpFrame.xy));", - " #endif", - "/*--------------------------------------------------------------------------*/", - " FxaaFloat lumaNS = lumaN + lumaS;", - " FxaaFloat lumaWE = lumaW + lumaE;", - " FxaaFloat subpixRcpRange = 1.0/range;", - " FxaaFloat subpixNSWE = lumaNS + lumaWE;", - " FxaaFloat edgeHorz1 = (-2.0 * lumaM) + lumaNS;", - " FxaaFloat edgeVert1 = (-2.0 * lumaM) + lumaWE;", - "/*--------------------------------------------------------------------------*/", - " FxaaFloat lumaNESE = lumaNE + lumaSE;", - " FxaaFloat lumaNWNE = lumaNW + lumaNE;", - " FxaaFloat edgeHorz2 = (-2.0 * lumaE) + lumaNESE;", - " FxaaFloat edgeVert2 = (-2.0 * lumaN) + lumaNWNE;", - "/*--------------------------------------------------------------------------*/", - " FxaaFloat lumaNWSW = lumaNW + lumaSW;", - " FxaaFloat lumaSWSE = lumaSW + lumaSE;", - " FxaaFloat edgeHorz4 = (abs(edgeHorz1) * 2.0) + abs(edgeHorz2);", - " FxaaFloat edgeVert4 = (abs(edgeVert1) * 2.0) + abs(edgeVert2);", - " FxaaFloat edgeHorz3 = (-2.0 * lumaW) + lumaNWSW;", - " FxaaFloat edgeVert3 = (-2.0 * lumaS) + lumaSWSE;", - " FxaaFloat edgeHorz = abs(edgeHorz3) + edgeHorz4;", - " FxaaFloat edgeVert = abs(edgeVert3) + edgeVert4;", - "/*--------------------------------------------------------------------------*/", - " FxaaFloat subpixNWSWNESE = lumaNWSW + lumaNESE;", - " FxaaFloat lengthSign = fxaaQualityRcpFrame.x;", - " FxaaBool horzSpan = edgeHorz >= edgeVert;", - " FxaaFloat subpixA = subpixNSWE * 2.0 + subpixNWSWNESE;", - "/*--------------------------------------------------------------------------*/", - " if(!horzSpan) lumaN = lumaW;", - " if(!horzSpan) lumaS = lumaE;", - " if(horzSpan) lengthSign = fxaaQualityRcpFrame.y;", - " FxaaFloat subpixB = (subpixA * (1.0/12.0)) - lumaM;", - "/*--------------------------------------------------------------------------*/", - " FxaaFloat gradientN = lumaN - lumaM;", - " FxaaFloat gradientS = lumaS - lumaM;", - " FxaaFloat lumaNN = lumaN + lumaM;", - " FxaaFloat lumaSS = lumaS + lumaM;", - " FxaaBool pairN = abs(gradientN) >= abs(gradientS);", - " FxaaFloat gradient = max(abs(gradientN), abs(gradientS));", - " if(pairN) lengthSign = -lengthSign;", - " FxaaFloat subpixC = FxaaSat(abs(subpixB) * subpixRcpRange);", - "/*--------------------------------------------------------------------------*/", - " FxaaFloat2 posB;", - " posB.x = posM.x;", - " posB.y = posM.y;", - " FxaaFloat2 offNP;", - " offNP.x = (!horzSpan) ? 0.0 : fxaaQualityRcpFrame.x;", - " offNP.y = ( horzSpan) ? 0.0 : fxaaQualityRcpFrame.y;", - " if(!horzSpan) posB.x += lengthSign * 0.5;", - " if( horzSpan) posB.y += lengthSign * 0.5;", - "/*--------------------------------------------------------------------------*/", - " FxaaFloat2 posN;", - " posN.x = posB.x - offNP.x * FXAA_QUALITY_P0;", - " posN.y = posB.y - offNP.y * FXAA_QUALITY_P0;", - " FxaaFloat2 posP;", - " posP.x = posB.x + offNP.x * FXAA_QUALITY_P0;", - " posP.y = posB.y + offNP.y * FXAA_QUALITY_P0;", - " FxaaFloat subpixD = ((-2.0)*subpixC) + 3.0;", - " FxaaFloat lumaEndN = FxaaLuma(FxaaTexTop(tex, posN));", - " FxaaFloat subpixE = subpixC * subpixC;", - " FxaaFloat lumaEndP = FxaaLuma(FxaaTexTop(tex, posP));", - "/*--------------------------------------------------------------------------*/", - " if(!pairN) lumaNN = lumaSS;", - " FxaaFloat gradientScaled = gradient * 1.0/4.0;", - " FxaaFloat lumaMM = lumaM - lumaNN * 0.5;", - " FxaaFloat subpixF = subpixD * subpixE;", - " FxaaBool lumaMLTZero = lumaMM < 0.0;", - "/*--------------------------------------------------------------------------*/", - " lumaEndN -= lumaNN * 0.5;", - " lumaEndP -= lumaNN * 0.5;", - " FxaaBool doneN = abs(lumaEndN) >= gradientScaled;", - " FxaaBool doneP = abs(lumaEndP) >= gradientScaled;", - " if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P1;", - " if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P1;", - " FxaaBool doneNP = (!doneN) || (!doneP);", - " if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P1;", - " if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P1;", - "/*--------------------------------------------------------------------------*/", - " if(doneNP) {", - " if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));", - " if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));", - " if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;", - " if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;", - " doneN = abs(lumaEndN) >= gradientScaled;", - " doneP = abs(lumaEndP) >= gradientScaled;", - " if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P2;", - " if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P2;", - " doneNP = (!doneN) || (!doneP);", - " if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P2;", - " if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P2;", - "/*--------------------------------------------------------------------------*/", - " #if (FXAA_QUALITY_PS > 3)", - " if(doneNP) {", - " if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));", - " if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));", - " if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;", - " if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;", - " doneN = abs(lumaEndN) >= gradientScaled;", - " doneP = abs(lumaEndP) >= gradientScaled;", - " if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P3;", - " if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P3;", - " doneNP = (!doneN) || (!doneP);", - " if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P3;", - " if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P3;", - "/*--------------------------------------------------------------------------*/", - " #if (FXAA_QUALITY_PS > 4)", - " if(doneNP) {", - " if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));", - " if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));", - " if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;", - " if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;", - " doneN = abs(lumaEndN) >= gradientScaled;", - " doneP = abs(lumaEndP) >= gradientScaled;", - " if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P4;", - " if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P4;", - " doneNP = (!doneN) || (!doneP);", - " if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P4;", - " if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P4;", - "/*--------------------------------------------------------------------------*/", - " #if (FXAA_QUALITY_PS > 5)", - " if(doneNP) {", - " if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));", - " if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));", - " if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;", - " if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;", - " doneN = abs(lumaEndN) >= gradientScaled;", - " doneP = abs(lumaEndP) >= gradientScaled;", - " if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P5;", - " if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P5;", - " doneNP = (!doneN) || (!doneP);", - " if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P5;", - " if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P5;", - "/*--------------------------------------------------------------------------*/", - " #if (FXAA_QUALITY_PS > 6)", - " if(doneNP) {", - " if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));", - " if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));", - " if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;", - " if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;", - " doneN = abs(lumaEndN) >= gradientScaled;", - " doneP = abs(lumaEndP) >= gradientScaled;", - " if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P6;", - " if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P6;", - " doneNP = (!doneN) || (!doneP);", - " if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P6;", - " if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P6;", - "/*--------------------------------------------------------------------------*/", - " #if (FXAA_QUALITY_PS > 7)", - " if(doneNP) {", - " if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));", - " if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));", - " if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;", - " if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;", - " doneN = abs(lumaEndN) >= gradientScaled;", - " doneP = abs(lumaEndP) >= gradientScaled;", - " if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P7;", - " if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P7;", - " doneNP = (!doneN) || (!doneP);", - " if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P7;", - " if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P7;", - "/*--------------------------------------------------------------------------*/", - " #if (FXAA_QUALITY_PS > 8)", - " if(doneNP) {", - " if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));", - " if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));", - " if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;", - " if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;", - " doneN = abs(lumaEndN) >= gradientScaled;", - " doneP = abs(lumaEndP) >= gradientScaled;", - " if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P8;", - " if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P8;", - " doneNP = (!doneN) || (!doneP);", - " if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P8;", - " if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P8;", - "/*--------------------------------------------------------------------------*/", - " #if (FXAA_QUALITY_PS > 9)", - " if(doneNP) {", - " if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));", - " if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));", - " if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;", - " if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;", - " doneN = abs(lumaEndN) >= gradientScaled;", - " doneP = abs(lumaEndP) >= gradientScaled;", - " if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P9;", - " if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P9;", - " doneNP = (!doneN) || (!doneP);", - " if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P9;", - " if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P9;", - "/*--------------------------------------------------------------------------*/", - " #if (FXAA_QUALITY_PS > 10)", - " if(doneNP) {", - " if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));", - " if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));", - " if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;", - " if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;", - " doneN = abs(lumaEndN) >= gradientScaled;", - " doneP = abs(lumaEndP) >= gradientScaled;", - " if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P10;", - " if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P10;", - " doneNP = (!doneN) || (!doneP);", - " if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P10;", - " if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P10;", - "/*--------------------------------------------------------------------------*/", - " #if (FXAA_QUALITY_PS > 11)", - " if(doneNP) {", - " if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));", - " if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));", - " if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;", - " if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;", - " doneN = abs(lumaEndN) >= gradientScaled;", - " doneP = abs(lumaEndP) >= gradientScaled;", - " if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P11;", - " if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P11;", - " doneNP = (!doneN) || (!doneP);", - " if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P11;", - " if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P11;", - "/*--------------------------------------------------------------------------*/", - " #if (FXAA_QUALITY_PS > 12)", - " if(doneNP) {", - " if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));", - " if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));", - " if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;", - " if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;", - " doneN = abs(lumaEndN) >= gradientScaled;", - " doneP = abs(lumaEndP) >= gradientScaled;", - " if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P12;", - " if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P12;", - " doneNP = (!doneN) || (!doneP);", - " if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P12;", - " if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P12;", - "/*--------------------------------------------------------------------------*/", - " }", - " #endif", - "/*--------------------------------------------------------------------------*/", - " }", - " #endif", - "/*--------------------------------------------------------------------------*/", - " }", - " #endif", - "/*--------------------------------------------------------------------------*/", - " }", - " #endif", - "/*--------------------------------------------------------------------------*/", - " }", - " #endif", - "/*--------------------------------------------------------------------------*/", - " }", - " #endif", - "/*--------------------------------------------------------------------------*/", - " }", - " #endif", - "/*--------------------------------------------------------------------------*/", - " }", - " #endif", - "/*--------------------------------------------------------------------------*/", - " }", - " #endif", - "/*--------------------------------------------------------------------------*/", - " }", - " #endif", - "/*--------------------------------------------------------------------------*/", - " }", - "/*--------------------------------------------------------------------------*/", - " FxaaFloat dstN = posM.x - posN.x;", - " FxaaFloat dstP = posP.x - posM.x;", - " if(!horzSpan) dstN = posM.y - posN.y;", - " if(!horzSpan) dstP = posP.y - posM.y;", - "/*--------------------------------------------------------------------------*/", - " FxaaBool goodSpanN = (lumaEndN < 0.0) != lumaMLTZero;", - " FxaaFloat spanLength = (dstP + dstN);", - " FxaaBool goodSpanP = (lumaEndP < 0.0) != lumaMLTZero;", - " FxaaFloat spanLengthRcp = 1.0/spanLength;", - "/*--------------------------------------------------------------------------*/", - " FxaaBool directionN = dstN < dstP;", - " FxaaFloat dst = min(dstN, dstP);", - " FxaaBool goodSpan = directionN ? goodSpanN : goodSpanP;", - " FxaaFloat subpixG = subpixF * subpixF;", - " FxaaFloat pixelOffset = (dst * (-spanLengthRcp)) + 0.5;", - " FxaaFloat subpixH = subpixG * fxaaQualitySubpix;", - "/*--------------------------------------------------------------------------*/", - " FxaaFloat pixelOffsetGood = goodSpan ? pixelOffset : 0.0;", - " FxaaFloat pixelOffsetSubpix = max(pixelOffsetGood, subpixH);", - " if(!horzSpan) posM.x += pixelOffsetSubpix * lengthSign;", - " if( horzSpan) posM.y += pixelOffsetSubpix * lengthSign;", - " #if (FXAA_DISCARD == 1)", - " return FxaaTexTop(tex, posM);", - " #else", - " return FxaaFloat4(FxaaTexTop(tex, posM).xyz, lumaM);", - " #endif", - "}", - "/*==========================================================================*/", - "#endif", - "", - "void main() {", - " gl_FragColor = FxaaPixelShader(", - " vUv,", - " vec4(0.0),", - " tDiffuse,", - " tDiffuse,", - " tDiffuse,", - " resolution,", - " vec4(0.0),", - " vec4(0.0),", - " vec4(0.0),", - " 0.75,", - " 0.166,", - " 0.0833,", - " 0.0,", - " 0.0,", - " 0.0,", - " vec4(0.0)", - " );", - "", - " // TODO avoid querying texture twice for same texel", - " gl_FragColor.a = texture2D(tDiffuse, vUv).a;", - "}" - ].join("\n") - -}; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/math/interpolants/LinearInterpolant.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/math/interpolants/LinearInterpolant.d.ts deleted file mode 100644 index 45a3ca6449ee63dfd7a20d5f5cab46b161654e32..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/math/interpolants/LinearInterpolant.d.ts +++ /dev/null @@ -1,12 +0,0 @@ -import { Interpolant } from '../Interpolant'; - -export class LinearInterpolant extends Interpolant { - constructor( - parameterPositions: any, - samplesValues: any, - sampleSize: number, - resultBuffer?: any - ); - - interpolate_(i1: number, t0: number, t: number, t1: number): any; -} diff --git a/spaces/bergum/commerce-demo/Dockerfile b/spaces/bergum/commerce-demo/Dockerfile deleted file mode 100644 index cb980891eb991b67524e6b25cce33088227068a3..0000000000000000000000000000000000000000 --- a/spaces/bergum/commerce-demo/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM vespaengine/vespa:latest - -USER root - -RUN yum install -y python39 python39-pip wget git -RUN pip3 install requests mmh3 spacy -RUN python3 -m spacy download en_core_web_sm - -COPY ./run.sh /opt/vespa/vespa/bin/run.sh -COPY ./proxy.py /opt/vespa/vespa/bin/proxy.py - -RUN chmod +x /opt/vespa/vespa/bin/proxy.py -RUN chmod +x /opt/vespa/vespa/bin/run.sh -RUN chown vespa /opt/vespa/vespa/bin/run.sh -RUN chown vespa /opt/vespa/vespa/bin/proxy.py - -RUN mkdir /opt/vespa/.m2/ -RUN mkdir /opt/maven -RUN chown vespa /opt/vespa/.m2/ -RUN chown vespa /opt/maven -USER vespa -EXPOSE 8000 -ENTRYPOINT ["/opt/vespa/vespa/bin/run.sh"] \ No newline at end of file diff --git a/spaces/bigjoker/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/noise.py b/spaces/bigjoker/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/noise.py deleted file mode 100644 index 768f0e9f73ea50b3262c643b712730f614488895..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/noise.py +++ /dev/null @@ -1,64 +0,0 @@ -import torch -import numpy as np -from PIL import ImageOps -import math -from .animation import sample_to_cv2 -import cv2 - -deforum_noise_gen = torch.Generator(device='cpu') - -# 2D Perlin noise in PyTorch https://gist.github.com/vadimkantorov/ac1b097753f217c5c11bc2ff396e0a57 -def rand_perlin_2d(shape, res, fade = lambda t: 6*t**5 - 15*t**4 + 10*t**3): - delta = (res[0] / shape[0], res[1] / shape[1]) - d = (shape[0] // res[0], shape[1] // res[1]) - - grid = torch.stack(torch.meshgrid(torch.arange(0, res[0], delta[0]), torch.arange(0, res[1], delta[1]), indexing='ij'), dim = -1) % 1 - angles = 2*math.pi*torch.rand(res[0]+1, res[1]+1, generator=deforum_noise_gen) - gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim = -1) - - tile_grads = lambda slice1, slice2: gradients[slice1[0]:slice1[1], slice2[0]:slice2[1]].repeat_interleave(d[0], 0).repeat_interleave(d[1], 1) - dot = lambda grad, shift: (torch.stack((grid[:shape[0],:shape[1],0] + shift[0], grid[:shape[0],:shape[1], 1] + shift[1] ), dim = -1) * grad[:shape[0], :shape[1]]).sum(dim = -1) - - n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0]) - n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0]) - n01 = dot(tile_grads([0, -1],[1, None]), [0, -1]) - n11 = dot(tile_grads([1, None], [1, None]), [-1,-1]) - t = fade(grid[:shape[0], :shape[1]]) - return math.sqrt(2) * torch.lerp(torch.lerp(n00, n10, t[..., 0]), torch.lerp(n01, n11, t[..., 0]), t[..., 1]) - -def rand_perlin_2d_octaves(shape, res, octaves=1, persistence=0.5): - noise = torch.zeros(shape) - frequency = 1 - amplitude = 1 - for _ in range(int(octaves)): - noise += amplitude * rand_perlin_2d(shape, (frequency*res[0], frequency*res[1])) - frequency *= 2 - amplitude *= persistence - return noise - -def condition_noise_mask(noise_mask, invert_mask = False): - if invert_mask: - noise_mask = ImageOps.invert(noise_mask) - noise_mask = np.array(noise_mask.convert("L")) - noise_mask = noise_mask.astype(np.float32) / 255.0 - noise_mask = np.around(noise_mask, decimals=0) - noise_mask = torch.from_numpy(noise_mask) - #noise_mask = torch.round(noise_mask) - return noise_mask - -def add_noise(sample, noise_amt: float, seed: int, noise_type: str, noise_args, noise_mask = None, invert_mask = False): - deforum_noise_gen.manual_seed(seed) # Reproducibility - sample2dshape = (sample.shape[0], sample.shape[1]) #sample is cv2, so height - width - noise = torch.randn((sample.shape[2], sample.shape[0], sample.shape[1]), generator=deforum_noise_gen) # White noise - if noise_type == 'perlin': - # rand_perlin_2d_octaves is between -1 and 1, so we need to shift it to be between 0 and 1 - # print(sample.shape) - noise = noise * ((rand_perlin_2d_octaves(sample2dshape, (int(noise_args[0]), int(noise_args[1])), octaves=noise_args[2], persistence=noise_args[3]) + torch.ones(sample2dshape)) / 2) - if noise_mask is not None: - noise_mask = condition_noise_mask(noise_mask, invert_mask) - noise_to_add = sample_to_cv2(noise * noise_mask) - else: - noise_to_add = sample_to_cv2(noise) - sample = cv2.addWeighted(sample, 1-noise_amt, noise_to_add, noise_amt, 0) - - return sample diff --git a/spaces/binarycache/voice_to_image/README.md b/spaces/binarycache/voice_to_image/README.md deleted file mode 100644 index 34d89445c1608fa0d344070a74689bf76f65e626..0000000000000000000000000000000000000000 --- a/spaces/binarycache/voice_to_image/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Voice to Image App -emoji: 📹 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_verion: 3.11.0 -app_file: app.py -pinned: false ---- - -# hugging_face_cxl diff --git a/spaces/bioriAsaeru/text-to-voice/Biokimia Harper Edisi 27 Ebook ((EXCLUSIVE)) Download.md b/spaces/bioriAsaeru/text-to-voice/Biokimia Harper Edisi 27 Ebook ((EXCLUSIVE)) Download.md deleted file mode 100644 index 6d07e8df6a07fb4365f8590e94796f9f1210628d..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Biokimia Harper Edisi 27 Ebook ((EXCLUSIVE)) Download.md +++ /dev/null @@ -1,9 +0,0 @@ -

      biokimia harper edisi 27 ebook download


      DOWNLOAD ::: https://urloso.com/2uySbZ



      -
      -Buku biokimia harper edisi 27, buku biokimia farmasi pdf, download buku biokimia. 27:1749-1755.Download free biokimia harper edisi 25 ebooks in pdf, mobi, . Download free biokimia harper edisi 27 ebooks in pdf, mobi, lr-biokimia harper edisi 27 | biokimia. -Download free biokimia harper edisi 25 ebooks in pdf, mobi, .. biokimia harper edisi 27: biokimia harper edisi 27: biokimia harper edisi 27 : biokimia harper. -Download free biokimia harper edisi 27 ebooks in pdf, mobi, lr-biokimia harper edisi 27 | biokimia. -Download free biokimia harper edisi 25 ebooks in pdf, mobi, .. biokimia harper edisi 27: biok 8a78ff9644
      -
      -
      -

      diff --git a/spaces/bioriAsaeru/text-to-voice/Casmate Pro 652 Windows 7.md b/spaces/bioriAsaeru/text-to-voice/Casmate Pro 652 Windows 7.md deleted file mode 100644 index dd4203efeb9be7f6f8176cfb49b814efda3f149f..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Casmate Pro 652 Windows 7.md +++ /dev/null @@ -1,12 +0,0 @@ -

      Casmate Pro 652 Windows 7


      Download File 🗸🗸🗸 https://urloso.com/2uyPe7



      -
      -Casmate Pro 652 Windows 7 64 Bit, Windows 10 64 Bit, Windows 8 64 Bit, Windows 8 32 Bit, Windows XP 32 Bit, Windows Vista 32 Bit, Windows Vista 64 Bit. -Windows 10 64 Bit, Windows XP 32 Bit, Windows Vista 32 Bit, -Windows Vista 64 Bit. -I am using a laptop to run the Linux distro. -Both laptop and linux are running 32 bit windows 10. -Linux runs on a laptop with 64 bit windows 10 -Laptop OS 64 Bit is running on Windows 7 64 Bit, Windows 8 64 Bit, Windows 8 32 Bit, Windows XP 32 Bit, Windows Vista 32 Bit, 8a78ff9644
      -
      -
      -

      diff --git a/spaces/bioriAsaeru/text-to-voice/Descargar Crack Para Aspel Caja 3.5 ((INSTALL)).md b/spaces/bioriAsaeru/text-to-voice/Descargar Crack Para Aspel Caja 3.5 ((INSTALL)).md deleted file mode 100644 index 7191db6cfc1186c36f15d5cbf2415c4596671999..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Descargar Crack Para Aspel Caja 3.5 ((INSTALL)).md +++ /dev/null @@ -1,48 +0,0 @@ -

      descargar crack para aspel caja 3.5


      DOWNLOAD ✏ ✏ ✏ https://urloso.com/2uyPMx



      -
      -.9 - - esos son paquetes - - para ahora saldrán 3.5.9, 5.0 y 5.5 - - el que venga, esta en proceso de versionado - - los 3.5 son para sistemas antiguos - - y de acuerdo con su propia jerga de cocina... deberían salir los nuevos en octubre... - - algo de teoria, mucha de prueba en una parte, mucha de prueba en la otra - - que podemos solucionar, que podemos hacer.... - - los problemas de confianza son muy distintos - - hay algunas cosas que puedes hacer, en las que las configuraciones son del sistema - - pero en otras no hay forma de controlarlo - - eso es verdad... pero en algunos de esos casos, creo que podemos hacer algo... - - y una vez que todo eso esté resuelto, nos sentaremos a joder con el dconf-editor - - como que usar el dconf-editor - - y que tengas que ser el mismo usuario de siempre - - y que no te autentiques bien - - no, usando sudo - - no te autentiques bien? - - y ya que estamos, cual es el problema? - - no tengas el usuario de siempre - - ¬¬ no, no es el tema... el tema es que usuario de siempre no te autentiques, pero puedes hacer cambios para que funcione una página... - - -
      -
      -

      diff --git a/spaces/bioriAsaeru/text-to-voice/Dogville Spanish [DvDRip] Avi ((LINK)).md b/spaces/bioriAsaeru/text-to-voice/Dogville Spanish [DvDRip] Avi ((LINK)).md deleted file mode 100644 index 05e794691063932b1a70f4835b53e549e94426ba..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Dogville Spanish [DvDRip] Avi ((LINK)).md +++ /dev/null @@ -1,6 +0,0 @@ -

      Dogville Spanish [DvDRip] Avi


      DOWNLOADhttps://urloso.com/2uyOY9



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/bioriAsaeru/text-to-voice/Ingenieria De Transito Y Carreteras Nicholas Garber Descargar Gratis.md b/spaces/bioriAsaeru/text-to-voice/Ingenieria De Transito Y Carreteras Nicholas Garber Descargar Gratis.md deleted file mode 100644 index 468950b04db78f25a4780d7bae14e86a714eda7f..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Ingenieria De Transito Y Carreteras Nicholas Garber Descargar Gratis.md +++ /dev/null @@ -1,6 +0,0 @@ -

      ingenieria de transito y carreteras nicholas garber descargar gratis


      Download Ziphttps://urloso.com/2uyOVu



      - -MANUAL DE CAPACIDAD DE CARRETERAS, 1998 - Nicholas J. Garber-Lester A. Hoel. INGENIERIA DE TRANSITO Y CARRETERAS. Edic. Thompson ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/brainblow/AudioCreator_Music-Audio_Generation/tests/models/test_audiogen.py b/spaces/brainblow/AudioCreator_Music-Audio_Generation/tests/models/test_audiogen.py deleted file mode 100644 index 3850af066cedd5ea38bd9aead9634d6aaf938218..0000000000000000000000000000000000000000 --- a/spaces/brainblow/AudioCreator_Music-Audio_Generation/tests/models/test_audiogen.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.models import AudioGen - - -class TestAudioGenModel: - def get_audiogen(self): - ag = AudioGen.get_pretrained(name='debug', device='cpu') - ag.set_generation_params(duration=2.0, extend_stride=2.) - return ag - - def test_base(self): - ag = self.get_audiogen() - assert ag.frame_rate == 25 - assert ag.sample_rate == 16000 - assert ag.audio_channels == 1 - - def test_generate_continuation(self): - ag = self.get_audiogen() - prompt = torch.randn(3, 1, 16000) - wav = ag.generate_continuation(prompt, 16000) - assert list(wav.shape) == [3, 1, 32000] - - prompt = torch.randn(2, 1, 16000) - wav = ag.generate_continuation( - prompt, 16000, ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 32000] - - prompt = torch.randn(2, 1, 16000) - with pytest.raises(AssertionError): - wav = ag.generate_continuation( - prompt, 16000, ['youpi', 'lapin dort', 'one too many']) - - def test_generate(self): - ag = self.get_audiogen() - wav = ag.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 32000] - - def test_generate_long(self): - ag = self.get_audiogen() - ag.max_duration = 3. - ag.set_generation_params(duration=4., extend_stride=2.) - wav = ag.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 16000 * 4] diff --git a/spaces/breadlicker45/gpt-youtuben-gen/README.md b/spaces/breadlicker45/gpt-youtuben-gen/README.md deleted file mode 100644 index fef287e122ebe349a43013f7d92bc8f0c16b3047..0000000000000000000000000000000000000000 --- a/spaces/breadlicker45/gpt-youtuben-gen/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: gpt-youtube text gen -emoji: 💻 -colorFrom: blue -colorTo: white -sdk: streamlit -sdk_version: 1.9.0 -app_file: app.py -pinned: true -duplicated_from: breadlicker45/gpt-ya-gen ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference \ No newline at end of file diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/demo/predictor.py b/spaces/brjathu/HMR2.0/vendor/detectron2/demo/predictor.py deleted file mode 100644 index 7b7ebd3f846850172c1f560f8492d51e5667f76d..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/demo/predictor.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import atexit -import bisect -import multiprocessing as mp -from collections import deque -import cv2 -import torch - -from detectron2.data import MetadataCatalog -from detectron2.engine.defaults import DefaultPredictor -from detectron2.utils.video_visualizer import VideoVisualizer -from detectron2.utils.visualizer import ColorMode, Visualizer - - -class VisualizationDemo(object): - def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False): - """ - Args: - cfg (CfgNode): - instance_mode (ColorMode): - parallel (bool): whether to run the model in different processes from visualization. - Useful since the visualization logic can be slow. - """ - self.metadata = MetadataCatalog.get( - cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused" - ) - self.cpu_device = torch.device("cpu") - self.instance_mode = instance_mode - - self.parallel = parallel - if parallel: - num_gpu = torch.cuda.device_count() - self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu) - else: - self.predictor = DefaultPredictor(cfg) - - def run_on_image(self, image): - """ - Args: - image (np.ndarray): an image of shape (H, W, C) (in BGR order). - This is the format used by OpenCV. - - Returns: - predictions (dict): the output of the model. - vis_output (VisImage): the visualized image output. - """ - vis_output = None - predictions = self.predictor(image) - # Convert image from OpenCV BGR format to Matplotlib RGB format. - image = image[:, :, ::-1] - visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode) - if "panoptic_seg" in predictions: - panoptic_seg, segments_info = predictions["panoptic_seg"] - vis_output = visualizer.draw_panoptic_seg_predictions( - panoptic_seg.to(self.cpu_device), segments_info - ) - else: - if "sem_seg" in predictions: - vis_output = visualizer.draw_sem_seg( - predictions["sem_seg"].argmax(dim=0).to(self.cpu_device) - ) - if "instances" in predictions: - instances = predictions["instances"].to(self.cpu_device) - vis_output = visualizer.draw_instance_predictions(predictions=instances) - - return predictions, vis_output - - def _frame_from_video(self, video): - while video.isOpened(): - success, frame = video.read() - if success: - yield frame - else: - break - - def run_on_video(self, video): - """ - Visualizes predictions on frames of the input video. - - Args: - video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be - either a webcam or a video file. - - Yields: - ndarray: BGR visualizations of each video frame. - """ - video_visualizer = VideoVisualizer(self.metadata, self.instance_mode) - - def process_predictions(frame, predictions): - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - if "panoptic_seg" in predictions: - panoptic_seg, segments_info = predictions["panoptic_seg"] - vis_frame = video_visualizer.draw_panoptic_seg_predictions( - frame, panoptic_seg.to(self.cpu_device), segments_info - ) - elif "instances" in predictions: - predictions = predictions["instances"].to(self.cpu_device) - vis_frame = video_visualizer.draw_instance_predictions(frame, predictions) - elif "sem_seg" in predictions: - vis_frame = video_visualizer.draw_sem_seg( - frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device) - ) - - # Converts Matplotlib RGB format to OpenCV BGR format - vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR) - return vis_frame - - frame_gen = self._frame_from_video(video) - if self.parallel: - buffer_size = self.predictor.default_buffer_size - - frame_data = deque() - - for cnt, frame in enumerate(frame_gen): - frame_data.append(frame) - self.predictor.put(frame) - - if cnt >= buffer_size: - frame = frame_data.popleft() - predictions = self.predictor.get() - yield process_predictions(frame, predictions) - - while len(frame_data): - frame = frame_data.popleft() - predictions = self.predictor.get() - yield process_predictions(frame, predictions) - else: - for frame in frame_gen: - yield process_predictions(frame, self.predictor(frame)) - - -class AsyncPredictor: - """ - A predictor that runs the model asynchronously, possibly on >1 GPUs. - Because rendering the visualization takes considerably amount of time, - this helps improve throughput a little bit when rendering videos. - """ - - class _StopToken: - pass - - class _PredictWorker(mp.Process): - def __init__(self, cfg, task_queue, result_queue): - self.cfg = cfg - self.task_queue = task_queue - self.result_queue = result_queue - super().__init__() - - def run(self): - predictor = DefaultPredictor(self.cfg) - - while True: - task = self.task_queue.get() - if isinstance(task, AsyncPredictor._StopToken): - break - idx, data = task - result = predictor(data) - self.result_queue.put((idx, result)) - - def __init__(self, cfg, num_gpus: int = 1): - """ - Args: - cfg (CfgNode): - num_gpus (int): if 0, will run on CPU - """ - num_workers = max(num_gpus, 1) - self.task_queue = mp.Queue(maxsize=num_workers * 3) - self.result_queue = mp.Queue(maxsize=num_workers * 3) - self.procs = [] - for gpuid in range(max(num_gpus, 1)): - cfg = cfg.clone() - cfg.defrost() - cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu" - self.procs.append( - AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue) - ) - - self.put_idx = 0 - self.get_idx = 0 - self.result_rank = [] - self.result_data = [] - - for p in self.procs: - p.start() - atexit.register(self.shutdown) - - def put(self, image): - self.put_idx += 1 - self.task_queue.put((self.put_idx, image)) - - def get(self): - self.get_idx += 1 # the index needed for this request - if len(self.result_rank) and self.result_rank[0] == self.get_idx: - res = self.result_data[0] - del self.result_data[0], self.result_rank[0] - return res - - while True: - # make sure the results are returned in the correct order - idx, res = self.result_queue.get() - if idx == self.get_idx: - return res - insert = bisect.bisect(self.result_rank, idx) - self.result_rank.insert(insert, idx) - self.result_data.insert(insert, res) - - def __len__(self): - return self.put_idx - self.get_idx - - def __call__(self, image): - self.put(image) - return self.get() - - def shutdown(self): - for _ in self.procs: - self.task_queue.put(AsyncPredictor._StopToken()) - - @property - def default_buffer_size(self): - return len(self.procs) * 5 diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/meta_arch/semantic_seg.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/meta_arch/semantic_seg.py deleted file mode 100644 index fefbecfb4f9ca84c4cf62c246cdcbf946016f0e6..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/meta_arch/semantic_seg.py +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -from typing import Callable, Dict, Optional, Tuple, Union -import fvcore.nn.weight_init as weight_init -import torch -from torch import nn -from torch.nn import functional as F - -from detectron2.config import configurable -from detectron2.layers import Conv2d, ShapeSpec, get_norm -from detectron2.structures import ImageList -from detectron2.utils.registry import Registry - -from ..backbone import Backbone, build_backbone -from ..postprocessing import sem_seg_postprocess -from .build import META_ARCH_REGISTRY - -__all__ = [ - "SemanticSegmentor", - "SEM_SEG_HEADS_REGISTRY", - "SemSegFPNHead", - "build_sem_seg_head", -] - - -SEM_SEG_HEADS_REGISTRY = Registry("SEM_SEG_HEADS") -SEM_SEG_HEADS_REGISTRY.__doc__ = """ -Registry for semantic segmentation heads, which make semantic segmentation predictions -from feature maps. -""" - - -@META_ARCH_REGISTRY.register() -class SemanticSegmentor(nn.Module): - """ - Main class for semantic segmentation architectures. - """ - - @configurable - def __init__( - self, - *, - backbone: Backbone, - sem_seg_head: nn.Module, - pixel_mean: Tuple[float], - pixel_std: Tuple[float], - ): - """ - Args: - backbone: a backbone module, must follow detectron2's backbone interface - sem_seg_head: a module that predicts semantic segmentation from backbone features - pixel_mean, pixel_std: list or tuple with #channels element, representing - the per-channel mean and std to be used to normalize the input image - """ - super().__init__() - self.backbone = backbone - self.sem_seg_head = sem_seg_head - self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) - self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) - - @classmethod - def from_config(cls, cfg): - backbone = build_backbone(cfg) - sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) - return { - "backbone": backbone, - "sem_seg_head": sem_seg_head, - "pixel_mean": cfg.MODEL.PIXEL_MEAN, - "pixel_std": cfg.MODEL.PIXEL_STD, - } - - @property - def device(self): - return self.pixel_mean.device - - def forward(self, batched_inputs): - """ - Args: - batched_inputs: a list, batched outputs of :class:`DatasetMapper`. - Each item in the list contains the inputs for one image. - - For now, each item in the list is a dict that contains: - - * "image": Tensor, image in (C, H, W) format. - * "sem_seg": semantic segmentation ground truth - * Other information that's included in the original dicts, such as: - "height", "width" (int): the output resolution of the model (may be different - from input resolution), used in inference. - - - Returns: - list[dict]: - Each dict is the output for one input image. - The dict contains one key "sem_seg" whose value is a - Tensor that represents the - per-pixel segmentation prediced by the head. - The prediction has shape KxHxW that represents the logits of - each class for each pixel. - """ - images = [x["image"].to(self.device) for x in batched_inputs] - images = [(x - self.pixel_mean) / self.pixel_std for x in images] - images = ImageList.from_tensors( - images, - self.backbone.size_divisibility, - padding_constraints=self.backbone.padding_constraints, - ) - - features = self.backbone(images.tensor) - - if "sem_seg" in batched_inputs[0]: - targets = [x["sem_seg"].to(self.device) for x in batched_inputs] - targets = ImageList.from_tensors( - targets, - self.backbone.size_divisibility, - self.sem_seg_head.ignore_value, - self.backbone.padding_constraints, - ).tensor - else: - targets = None - results, losses = self.sem_seg_head(features, targets) - - if self.training: - return losses - - processed_results = [] - for result, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - r = sem_seg_postprocess(result, image_size, height, width) - processed_results.append({"sem_seg": r}) - return processed_results - - -def build_sem_seg_head(cfg, input_shape): - """ - Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`. - """ - name = cfg.MODEL.SEM_SEG_HEAD.NAME - return SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) - - -@SEM_SEG_HEADS_REGISTRY.register() -class SemSegFPNHead(nn.Module): - """ - A semantic segmentation head described in :paper:`PanopticFPN`. - It takes a list of FPN features as input, and applies a sequence of - 3x3 convs and upsampling to scale all of them to the stride defined by - ``common_stride``. Then these features are added and used to make final - predictions by another 1x1 conv layer. - """ - - @configurable - def __init__( - self, - input_shape: Dict[str, ShapeSpec], - *, - num_classes: int, - conv_dims: int, - common_stride: int, - loss_weight: float = 1.0, - norm: Optional[Union[str, Callable]] = None, - ignore_value: int = -1, - ): - """ - NOTE: this interface is experimental. - - Args: - input_shape: shapes (channels and stride) of the input features - num_classes: number of classes to predict - conv_dims: number of output channels for the intermediate conv layers. - common_stride: the common stride that all features will be upscaled to - loss_weight: loss weight - norm (str or callable): normalization for all conv layers - ignore_value: category id to be ignored during training. - """ - super().__init__() - input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) - if not len(input_shape): - raise ValueError("SemSegFPNHead(input_shape=) cannot be empty!") - self.in_features = [k for k, v in input_shape] - feature_strides = [v.stride for k, v in input_shape] - feature_channels = [v.channels for k, v in input_shape] - - self.ignore_value = ignore_value - self.common_stride = common_stride - self.loss_weight = loss_weight - - self.scale_heads = [] - for in_feature, stride, channels in zip( - self.in_features, feature_strides, feature_channels - ): - head_ops = [] - head_length = max(1, int(np.log2(stride) - np.log2(self.common_stride))) - for k in range(head_length): - norm_module = get_norm(norm, conv_dims) - conv = Conv2d( - channels if k == 0 else conv_dims, - conv_dims, - kernel_size=3, - stride=1, - padding=1, - bias=not norm, - norm=norm_module, - activation=F.relu, - ) - weight_init.c2_msra_fill(conv) - head_ops.append(conv) - if stride != self.common_stride: - head_ops.append( - nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) - ) - self.scale_heads.append(nn.Sequential(*head_ops)) - self.add_module(in_feature, self.scale_heads[-1]) - self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0) - weight_init.c2_msra_fill(self.predictor) - - @classmethod - def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): - return { - "input_shape": { - k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES - }, - "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, - "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, - "conv_dims": cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM, - "common_stride": cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE, - "norm": cfg.MODEL.SEM_SEG_HEAD.NORM, - "loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT, - } - - def forward(self, features, targets=None): - """ - Returns: - In training, returns (None, dict of losses) - In inference, returns (CxHxW logits, {}) - """ - x = self.layers(features) - if self.training: - return None, self.losses(x, targets) - else: - x = F.interpolate( - x, scale_factor=self.common_stride, mode="bilinear", align_corners=False - ) - return x, {} - - def layers(self, features): - for i, f in enumerate(self.in_features): - if i == 0: - x = self.scale_heads[i](features[f]) - else: - x = x + self.scale_heads[i](features[f]) - x = self.predictor(x) - return x - - def losses(self, predictions, targets): - predictions = predictions.float() # https://github.com/pytorch/pytorch/issues/48163 - predictions = F.interpolate( - predictions, - scale_factor=self.common_stride, - mode="bilinear", - align_corners=False, - ) - loss = F.cross_entropy( - predictions, targets, reduction="mean", ignore_index=self.ignore_value - ) - losses = {"loss_sem_seg": loss * self.loss_weight} - return losses diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/tests/structures/test_rotated_boxes.py b/spaces/brjathu/HMR2.0/vendor/detectron2/tests/structures/test_rotated_boxes.py deleted file mode 100644 index 478f034a4b8e1b48a1ace5c0a4823ecdf15c8536..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/tests/structures/test_rotated_boxes.py +++ /dev/null @@ -1,441 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from __future__ import absolute_import, division, print_function, unicode_literals -import logging -import math -import random -import unittest -import torch -from fvcore.common.benchmark import benchmark - -from detectron2.layers.rotated_boxes import pairwise_iou_rotated -from detectron2.structures.boxes import Boxes -from detectron2.structures.rotated_boxes import RotatedBoxes, pairwise_iou -from detectron2.utils.testing import reload_script_model - -logger = logging.getLogger(__name__) - - -class TestRotatedBoxesLayer(unittest.TestCase): - def test_iou_0_dim_cpu(self): - boxes1 = torch.rand(0, 5, dtype=torch.float32) - boxes2 = torch.rand(10, 5, dtype=torch.float32) - expected_ious = torch.zeros(0, 10, dtype=torch.float32) - ious = pairwise_iou_rotated(boxes1, boxes2) - self.assertTrue(torch.allclose(ious, expected_ious)) - - boxes1 = torch.rand(10, 5, dtype=torch.float32) - boxes2 = torch.rand(0, 5, dtype=torch.float32) - expected_ious = torch.zeros(10, 0, dtype=torch.float32) - ious = pairwise_iou_rotated(boxes1, boxes2) - self.assertTrue(torch.allclose(ious, expected_ious)) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_iou_0_dim_cuda(self): - boxes1 = torch.rand(0, 5, dtype=torch.float32) - boxes2 = torch.rand(10, 5, dtype=torch.float32) - expected_ious = torch.zeros(0, 10, dtype=torch.float32) - ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) - self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious)) - - boxes1 = torch.rand(10, 5, dtype=torch.float32) - boxes2 = torch.rand(0, 5, dtype=torch.float32) - expected_ious = torch.zeros(10, 0, dtype=torch.float32) - ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) - self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious)) - - def test_iou_half_overlap_cpu(self): - boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32) - boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32) - expected_ious = torch.tensor([[0.5]], dtype=torch.float32) - ious = pairwise_iou_rotated(boxes1, boxes2) - self.assertTrue(torch.allclose(ious, expected_ious)) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_iou_half_overlap_cuda(self): - boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32) - boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32) - expected_ious = torch.tensor([[0.5]], dtype=torch.float32) - ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) - self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious)) - - def test_iou_precision(self): - for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): - boxes1 = torch.tensor([[565, 565, 10, 10.0, 0]], dtype=torch.float32, device=device) - boxes2 = torch.tensor([[565, 565, 10, 8.3, 0]], dtype=torch.float32, device=device) - iou = 8.3 / 10.0 - expected_ious = torch.tensor([[iou]], dtype=torch.float32) - ious = pairwise_iou_rotated(boxes1, boxes2) - self.assertTrue(torch.allclose(ious.cpu(), expected_ious)) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_iou_too_many_boxes_cuda(self): - s1, s2 = 5, 1289035 - boxes1 = torch.zeros(s1, 5) - boxes2 = torch.zeros(s2, 5) - ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) - self.assertTupleEqual(tuple(ious_cuda.shape), (s1, s2)) - - def test_iou_extreme(self): - # Cause floating point issues in cuda kernels (#1266) - for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): - boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device) - boxes2 = torch.tensor( - [ - [ - -1.117407639806935e17, - 1.3858420478349148e18, - 1000.0000610351562, - 1000.0000610351562, - 1612.0, - ] - ], - device=device, - ) - ious = pairwise_iou_rotated(boxes1, boxes2) - self.assertTrue(ious.min() >= 0, ious) - - def test_iou_issue_2154(self): - for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): - boxes1 = torch.tensor( - [ - [ - 296.6620178222656, - 458.73883056640625, - 23.515729904174805, - 47.677001953125, - 0.08795166015625, - ] - ], - device=device, - ) - boxes2 = torch.tensor( - [[296.66201, 458.73882000000003, 23.51573, 47.67702, 0.087951]], - device=device, - ) - ious = pairwise_iou_rotated(boxes1, boxes2) - expected_ious = torch.tensor([[1.0]], dtype=torch.float32) - self.assertTrue(torch.allclose(ious.cpu(), expected_ious)) - - def test_iou_issue_2167(self): - for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): - boxes1 = torch.tensor( - [ - [ - 2563.74462890625000000000, - 1436.79016113281250000000, - 2174.70336914062500000000, - 214.09500122070312500000, - 115.11834716796875000000, - ] - ], - device=device, - ) - boxes2 = torch.tensor( - [ - [ - 2563.74462890625000000000, - 1436.79028320312500000000, - 2174.70288085937500000000, - 214.09495544433593750000, - 115.11835479736328125000, - ] - ], - device=device, - ) - ious = pairwise_iou_rotated(boxes1, boxes2) - expected_ious = torch.tensor([[1.0]], dtype=torch.float32) - self.assertTrue(torch.allclose(ious.cpu(), expected_ious)) - - -class TestRotatedBoxesStructure(unittest.TestCase): - def test_clip_area_0_degree(self): - for _ in range(50): - num_boxes = 100 - boxes_5d = torch.zeros(num_boxes, 5) - boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500) - boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500) - boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500) - boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500) - # Convert from (x_ctr, y_ctr, w, h, 0) to (x1, y1, x2, y2) - boxes_4d = torch.zeros(num_boxes, 4) - boxes_4d[:, 0] = boxes_5d[:, 0] - boxes_5d[:, 2] / 2.0 - boxes_4d[:, 1] = boxes_5d[:, 1] - boxes_5d[:, 3] / 2.0 - boxes_4d[:, 2] = boxes_5d[:, 0] + boxes_5d[:, 2] / 2.0 - boxes_4d[:, 3] = boxes_5d[:, 1] + boxes_5d[:, 3] / 2.0 - - image_size = (500, 600) - test_boxes_4d = Boxes(boxes_4d) - test_boxes_5d = RotatedBoxes(boxes_5d) - # Before clip - areas_4d = test_boxes_4d.area() - areas_5d = test_boxes_5d.area() - self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5)) - # After clip - test_boxes_4d.clip(image_size) - test_boxes_5d.clip(image_size) - areas_4d = test_boxes_4d.area() - areas_5d = test_boxes_5d.area() - self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5)) - - def test_clip_area_arbitrary_angle(self): - num_boxes = 100 - boxes_5d = torch.zeros(num_boxes, 5) - boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500) - boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500) - boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500) - boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500) - boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800) - clip_angle_threshold = random.uniform(0, 180) - - image_size = (500, 600) - test_boxes_5d = RotatedBoxes(boxes_5d) - # Before clip - areas_before = test_boxes_5d.area() - # After clip - test_boxes_5d.clip(image_size, clip_angle_threshold) - areas_diff = test_boxes_5d.area() - areas_before - - # the areas should only decrease after clipping - self.assertTrue(torch.all(areas_diff <= 0)) - # whenever the box is clipped (thus the area shrinks), - # the angle for the box must be within the clip_angle_threshold - # Note that the clip function will normalize the angle range - # to be within (-180, 180] - - self.assertTrue( - torch.all( - torch.abs(test_boxes_5d.tensor[:, 4][torch.where(areas_diff < 0)]) - < clip_angle_threshold - ) - ) - - def test_normalize_angles(self): - # torch.manual_seed(0) - for _ in range(50): - num_boxes = 100 - boxes_5d = torch.zeros(num_boxes, 5) - boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500) - boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500) - boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500) - boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500) - boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800) - rotated_boxes = RotatedBoxes(boxes_5d) - normalized_boxes = rotated_boxes.clone() - normalized_boxes.normalize_angles() - self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] >= -180)) - self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] < 180)) - # x, y, w, h should not change - self.assertTrue(torch.allclose(boxes_5d[:, :4], normalized_boxes.tensor[:, :4])) - # the cos/sin values of the angles should stay the same - - self.assertTrue( - torch.allclose( - torch.cos(boxes_5d[:, 4] * math.pi / 180), - torch.cos(normalized_boxes.tensor[:, 4] * math.pi / 180), - atol=1e-5, - ) - ) - - self.assertTrue( - torch.allclose( - torch.sin(boxes_5d[:, 4] * math.pi / 180), - torch.sin(normalized_boxes.tensor[:, 4] * math.pi / 180), - atol=1e-5, - ) - ) - - def test_pairwise_iou_0_degree(self): - for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): - boxes1 = torch.tensor( - [[0.5, 0.5, 1.0, 1.0, 0.0], [0.5, 0.5, 1.0, 1.0, 0.0]], - dtype=torch.float32, - device=device, - ) - boxes2 = torch.tensor( - [ - [0.5, 0.5, 1.0, 1.0, 0.0], - [0.25, 0.5, 0.5, 1.0, 0.0], - [0.5, 0.25, 1.0, 0.5, 0.0], - [0.25, 0.25, 0.5, 0.5, 0.0], - [0.75, 0.75, 0.5, 0.5, 0.0], - [1.0, 1.0, 1.0, 1.0, 0.0], - ], - dtype=torch.float32, - device=device, - ) - expected_ious = torch.tensor( - [ - [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], - [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], - ], - dtype=torch.float32, - device=device, - ) - ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) - self.assertTrue(torch.allclose(ious, expected_ious)) - - def test_pairwise_iou_45_degrees(self): - for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): - boxes1 = torch.tensor( - [ - [1, 1, math.sqrt(2), math.sqrt(2), 45], - [1, 1, 2 * math.sqrt(2), 2 * math.sqrt(2), -45], - ], - dtype=torch.float32, - device=device, - ) - boxes2 = torch.tensor([[1, 1, 2, 2, 0]], dtype=torch.float32, device=device) - expected_ious = torch.tensor([[0.5], [0.5]], dtype=torch.float32, device=device) - ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) - self.assertTrue(torch.allclose(ious, expected_ious)) - - def test_pairwise_iou_orthogonal(self): - for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): - boxes1 = torch.tensor([[5, 5, 10, 6, 55]], dtype=torch.float32, device=device) - boxes2 = torch.tensor([[5, 5, 10, 6, -35]], dtype=torch.float32, device=device) - iou = (6.0 * 6.0) / (6.0 * 6.0 + 4.0 * 6.0 + 4.0 * 6.0) - expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) - ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) - self.assertTrue(torch.allclose(ious, expected_ious)) - - def test_pairwise_iou_large_close_boxes(self): - for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): - boxes1 = torch.tensor( - [[299.500000, 417.370422, 600.000000, 364.259186, 27.1828]], - dtype=torch.float32, - device=device, - ) - boxes2 = torch.tensor( - [[299.500000, 417.370422, 600.000000, 364.259155, 27.1828]], - dtype=torch.float32, - device=device, - ) - iou = 364.259155 / 364.259186 - expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) - ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) - self.assertTrue(torch.allclose(ious, expected_ious)) - - def test_pairwise_iou_many_boxes(self): - for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): - num_boxes1 = 100 - num_boxes2 = 200 - boxes1 = torch.stack( - [ - torch.tensor( - [5 + 20 * i, 5 + 20 * i, 10, 10, 0], - dtype=torch.float32, - device=device, - ) - for i in range(num_boxes1) - ] - ) - boxes2 = torch.stack( - [ - torch.tensor( - [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0], - dtype=torch.float32, - device=device, - ) - for i in range(num_boxes2) - ] - ) - expected_ious = torch.zeros(num_boxes1, num_boxes2, dtype=torch.float32, device=device) - for i in range(min(num_boxes1, num_boxes2)): - expected_ious[i][i] = (1 + 9 * i / num_boxes2) / 10.0 - ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) - self.assertTrue(torch.allclose(ious, expected_ious)) - - def test_pairwise_iou_issue1207_simplified(self): - for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): - # Simplified test case of D2-issue-1207 - boxes1 = torch.tensor([[3, 3, 8, 2, -45.0]], device=device) - boxes2 = torch.tensor([[6, 0, 8, 2, -45.0]], device=device) - iou = 0.0 - expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) - - ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) - self.assertTrue(torch.allclose(ious, expected_ious)) - - def test_pairwise_iou_issue1207(self): - for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): - # The original test case in D2-issue-1207 - boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device) - boxes2 = torch.tensor([[190.0, 127.0, 80.0, 21.0, -46.0]], device=device) - - iou = 0.0 - expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) - - ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) - self.assertTrue(torch.allclose(ious, expected_ious)) - - def test_empty_cat(self): - x = RotatedBoxes.cat([]) - self.assertTrue(x.tensor.shape, (0, 5)) - - def test_scriptability(self): - def func(x): - boxes = RotatedBoxes(x) - test = boxes.to(torch.device("cpu")).tensor - return boxes.area(), test - - f = torch.jit.script(func) - f = reload_script_model(f) - f(torch.rand((3, 5))) - - data = torch.rand((3, 5)) - - def func_cat(x: torch.Tensor): - boxes1 = RotatedBoxes(x) - boxes2 = RotatedBoxes(x) - # this is not supported by torchscript for now. - # boxes3 = RotatedBoxes.cat([boxes1, boxes2]) - boxes3 = boxes1.cat([boxes1, boxes2]) - return boxes3 - - f = torch.jit.script(func_cat) - script_box = f(data) - self.assertTrue(torch.equal(torch.cat([data, data]), script_box.tensor)) - - -def benchmark_rotated_iou(): - num_boxes1 = 200 - num_boxes2 = 500 - boxes1 = torch.stack( - [ - torch.tensor([5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32) - for i in range(num_boxes1) - ] - ) - boxes2 = torch.stack( - [ - torch.tensor( - [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0], - dtype=torch.float32, - ) - for i in range(num_boxes2) - ] - ) - - def func(dev, n=1): - b1 = boxes1.to(device=dev) - b2 = boxes2.to(device=dev) - - def bench(): - for _ in range(n): - pairwise_iou_rotated(b1, b2) - if dev.type == "cuda": - torch.cuda.synchronize() - - return bench - - # only run it once per timed loop, since it's slow - args = [{"dev": torch.device("cpu"), "n": 1}] - if torch.cuda.is_available(): - args.append({"dev": torch.device("cuda"), "n": 10}) - - benchmark(func, "rotated_iou", args, warmup_iters=3) - - -if __name__ == "__main__": - unittest.main() - benchmark_rotated_iou() diff --git a/spaces/candlend/vits-hoshimi/sovits/preprocess_hubert_f0.py b/spaces/candlend/vits-hoshimi/sovits/preprocess_hubert_f0.py deleted file mode 100644 index 4fe7f21541acb01537797f430d53b3c0e63279e1..0000000000000000000000000000000000000000 --- a/spaces/candlend/vits-hoshimi/sovits/preprocess_hubert_f0.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -import argparse - -import torch -import json -from glob import glob - -from pyworld import pyworld -from tqdm import tqdm -from scipy.io import wavfile - -import utils -from mel_processing import mel_spectrogram_torch -#import h5py -import logging -logging.getLogger('numba').setLevel(logging.WARNING) - -import parselmouth -import librosa -import numpy as np - - -def get_f0(path,p_len=None, f0_up_key=0): - x, _ = librosa.load(path, 32000) - if p_len is None: - p_len = x.shape[0]//320 - else: - assert abs(p_len-x.shape[0]//320) < 3, (path, p_len, x.shape) - time_step = 320 / 32000 * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - f0 = parselmouth.Sound(x, 32000).to_pitch_ac( - time_step=time_step / 1000, voicing_threshold=0.6, - pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] - - pad_size=(p_len - len(f0) + 1) // 2 - if(pad_size>0 or p_len - len(f0) - pad_size>0): - f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant') - - f0bak = f0.copy() - f0 *= pow(2, f0_up_key / 12) - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak - -def resize2d(x, target_len): - source = np.array(x) - source[source<0.001] = np.nan - target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source) - res = np.nan_to_num(target) - return res - -def compute_f0(path, c_len): - x, sr = librosa.load(path, sr=32000) - f0, t = pyworld.dio( - x.astype(np.double), - fs=sr, - f0_ceil=800, - frame_period=1000 * 320 / sr, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, 32000) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - assert abs(c_len - x.shape[0]//320) < 3, (c_len, f0.shape) - - return None, resize2d(f0, c_len) - - -def process(filename): - print(filename) - save_name = filename+".soft.pt" - if not os.path.exists(save_name): - devive = torch.device("cuda" if torch.cuda.is_available() else "cpu") - wav, _ = librosa.load(filename, sr=16000) - wav = torch.from_numpy(wav).unsqueeze(0).to(devive) - c = utils.get_hubert_content(hmodel, wav) - torch.save(c.cpu(), save_name) - else: - c = torch.load(save_name) - f0path = filename+".f0.npy" - if not os.path.exists(f0path): - cf0, f0 = compute_f0(filename, c.shape[-1] * 2) - np.save(f0path, f0) - - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--in_dir", type=str, default="dataset/32k", help="path to input dir") - args = parser.parse_args() - - print("Loading hubert for content...") - hmodel = utils.get_hubert_model(0 if torch.cuda.is_available() else None) - print("Loaded hubert.") - - filenames = glob(f'{args.in_dir}/*/*.wav', recursive=True)#[:10] - - for filename in tqdm(filenames): - process(filename) - \ No newline at end of file diff --git a/spaces/cbhasker/bhasker1323genAIApp/app.py b/spaces/cbhasker/bhasker1323genAIApp/app.py deleted file mode 100644 index 2dbf3ae89c2e3fdab7134107dd346f984dca8eb1..0000000000000000000000000000000000000000 --- a/spaces/cbhasker/bhasker1323genAIApp/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """Meet Riya, your youthful and witty personal assistant! At 21 years old, she's full of energy and always eager to help. Riya's goal is to assist you with any questions or problems you might have. Her enthusiasm shines through in every response, making interactions with her enjoyable and engaging. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/cesar/autotexto/README.md b/spaces/cesar/autotexto/README.md deleted file mode 100644 index 48571a20ac5ad0d016271997f28b43d30ec5a173..0000000000000000000000000000000000000000 --- a/spaces/cesar/autotexto/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Autotexto -emoji: 🐠 -colorFrom: pink -colorTo: gray -sdk: streamlit -sdk_version: 1.2.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/language-modeling/run_mlm.py b/spaces/chendl/compositional_test/transformers/examples/pytorch/language-modeling/run_mlm.py deleted file mode 100644 index ab7a2cc42775a3bb4e441e9b0d8b98efcea56889..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/pytorch/language-modeling/run_mlm.py +++ /dev/null @@ -1,659 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2020 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) on a text file or a dataset. - -Here is the full list of checkpoints on the hub that can be fine-tuned by this script: -https://huggingface.co/models?filter=fill-mask -""" -# You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. - -import logging -import math -import os -import sys -from dataclasses import dataclass, field -from itertools import chain -from typing import Optional - -import datasets -import evaluate -from datasets import load_dataset - -import transformers -from transformers import ( - CONFIG_MAPPING, - MODEL_FOR_MASKED_LM_MAPPING, - AutoConfig, - AutoModelForMaskedLM, - AutoTokenizer, - DataCollatorForLanguageModeling, - HfArgumentParser, - Trainer, - TrainingArguments, - is_torch_tpu_available, - set_seed, -) -from transformers.trainer_utils import get_last_checkpoint -from transformers.utils import check_min_version, send_example_telemetry -from transformers.utils.versions import require_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.28.0") - -require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") - -logger = logging.getLogger(__name__) -MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. - """ - - model_name_or_path: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." - ) - }, - ) - model_type: Optional[str] = field( - default=None, - metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, - ) - config_overrides: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Override some existing default config settings when a model is trained from scratch. Example: " - "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" - ) - }, - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - low_cpu_mem_usage: bool = field( - default=False, - metadata={ - "help": ( - "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded." - "set True will benefit LLM loading time and RAM consumption." - ) - }, - ) - - def __post_init__(self): - if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): - raise ValueError( - "--config_overrides can't be used in combination with --config_name or --model_name_or_path" - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - validation_split_percentage: Optional[int] = field( - default=5, - metadata={ - "help": "The percentage of the train set used as validation set in case there's no validation split" - }, - ) - max_seq_length: Optional[int] = field( - default=None, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated." - ) - }, - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - mlm_probability: float = field( - default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} - ) - line_by_line: bool = field( - default=False, - metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to `max_seq_length`. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) - - def __post_init__(self): - if self.streaming: - require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`") - - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - if extension not in ["csv", "json", "txt"]: - raise ValueError("`train_file` should be a csv, a json or a txt file.") - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - if extension not in ["csv", "json", "txt"]: - raise ValueError("`validation_file` should be a csv, a json or a txt file.") - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The - # information sent is the one passed as arguments along with your Python/PyTorch versions. - send_example_telemetry("run_mlm", model_args, data_args) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - - if training_args.should_log: - # The default of training_args.log_level is passive, so we set log level at info here to have that default. - transformers.utils.logging.set_verbosity_info() - - log_level = training_args.get_process_log_level() - logger.setLevel(log_level) - datasets.utils.logging.set_verbosity(log_level) - transformers.utils.logging.set_verbosity(log_level) - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - # Set the verbosity to info of the Transformers logger (on main process only): - logger.info(f"Training/evaluation parameters {training_args}") - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub - # - # For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this - # behavior (see below) - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - streaming=data_args.streaming, - ) - if "validation" not in raw_datasets.keys(): - raw_datasets["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - streaming=data_args.streaming, - ) - raw_datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - streaming=data_args.streaming, - ) - else: - data_files = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - extension = data_args.train_file.split(".")[-1] - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = data_args.validation_file.split(".")[-1] - if extension == "txt": - extension = "text" - raw_datasets = load_dataset( - extension, - data_files=data_files, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - - # If no validation data is there, validation_split_percentage will be used to divide the dataset. - if "validation" not in raw_datasets.keys(): - raw_datasets["validation"] = load_dataset( - extension, - data_files=data_files, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - raw_datasets["train"] = load_dataset( - extension, - data_files=data_files, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config_kwargs = { - "cache_dir": model_args.cache_dir, - "revision": model_args.model_revision, - "use_auth_token": True if model_args.use_auth_token else None, - } - if model_args.config_name: - config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) - elif model_args.model_name_or_path: - config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) - else: - config = CONFIG_MAPPING[model_args.model_type]() - logger.warning("You are instantiating a new config instance from scratch.") - if model_args.config_overrides is not None: - logger.info(f"Overriding config: {model_args.config_overrides}") - config.update_from_string(model_args.config_overrides) - logger.info(f"New config: {config}") - - tokenizer_kwargs = { - "cache_dir": model_args.cache_dir, - "use_fast": model_args.use_fast_tokenizer, - "revision": model_args.model_revision, - "use_auth_token": True if model_args.use_auth_token else None, - } - if model_args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) - elif model_args.model_name_or_path: - tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) - else: - raise ValueError( - "You are instantiating a new tokenizer from scratch. This is not supported by this script." - "You can do it from another script, save it, and load it from here, using --tokenizer_name." - ) - - if model_args.model_name_or_path: - model = AutoModelForMaskedLM.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - low_cpu_mem_usage=model_args.low_cpu_mem_usage, - ) - else: - logger.info("Training new model from scratch") - model = AutoModelForMaskedLM.from_config(config) - - # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch - # on a small vocab and want a smaller embedding size, remove this test. - embedding_size = model.get_input_embeddings().weight.shape[0] - if len(tokenizer) > embedding_size: - model.resize_token_embeddings(len(tokenizer)) - - # Preprocessing the datasets. - # First we tokenize all the texts. - if training_args.do_train: - column_names = list(raw_datasets["train"].features) - else: - column_names = list(raw_datasets["validation"].features) - text_column_name = "text" if "text" in column_names else column_names[0] - - if data_args.max_seq_length is None: - max_seq_length = tokenizer.model_max_length - if max_seq_length > 1024: - logger.warning( - "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" - " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" - " override this default with `--block_size xxx`." - ) - max_seq_length = 1024 - else: - if data_args.max_seq_length > tokenizer.model_max_length: - logger.warning( - f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" - f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." - ) - max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) - - if data_args.line_by_line: - # When using line_by_line, we just tokenize each nonempty line. - padding = "max_length" if data_args.pad_to_max_length else False - - def tokenize_function(examples): - # Remove empty lines - examples[text_column_name] = [ - line for line in examples[text_column_name] if len(line) > 0 and not line.isspace() - ] - return tokenizer( - examples[text_column_name], - padding=padding, - truncation=True, - max_length=max_seq_length, - # We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it - # receives the `special_tokens_mask`. - return_special_tokens_mask=True, - ) - - with training_args.main_process_first(desc="dataset map tokenization"): - if not data_args.streaming: - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=[text_column_name], - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on dataset line_by_line", - ) - else: - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - remove_columns=[text_column_name], - ) - else: - # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts. - # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more - # efficient when it receives the `special_tokens_mask`. - def tokenize_function(examples): - return tokenizer(examples[text_column_name], return_special_tokens_mask=True) - - with training_args.main_process_first(desc="dataset map tokenization"): - if not data_args.streaming: - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on every text in dataset", - ) - else: - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - remove_columns=column_names, - ) - - # Main data processing function that will concatenate all texts from our dataset and generate chunks of - # max_seq_length. - def group_texts(examples): - # Concatenate all texts. - concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} - total_length = len(concatenated_examples[list(examples.keys())[0]]) - # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can - # customize this part to your needs. - if total_length >= max_seq_length: - total_length = (total_length // max_seq_length) * max_seq_length - # Split by chunks of max_len. - result = { - k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)] - for k, t in concatenated_examples.items() - } - return result - - # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a - # remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value - # might be slower to preprocess. - # - # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: - # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map - - with training_args.main_process_first(desc="grouping texts together"): - if not data_args.streaming: - tokenized_datasets = tokenized_datasets.map( - group_texts, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - desc=f"Grouping texts in chunks of {max_seq_length}", - ) - else: - tokenized_datasets = tokenized_datasets.map( - group_texts, - batched=True, - ) - - if training_args.do_train: - if "train" not in tokenized_datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = tokenized_datasets["train"] - if data_args.max_train_samples is not None: - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - - if training_args.do_eval: - if "validation" not in tokenized_datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_dataset = tokenized_datasets["validation"] - if data_args.max_eval_samples is not None: - max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) - eval_dataset = eval_dataset.select(range(max_eval_samples)) - - def preprocess_logits_for_metrics(logits, labels): - if isinstance(logits, tuple): - # Depending on the model and config, logits may contain extra tensors, - # like past_key_values, but logits always come first - logits = logits[0] - return logits.argmax(dim=-1) - - metric = evaluate.load("accuracy") - - def compute_metrics(eval_preds): - preds, labels = eval_preds - # preds have the same shape as the labels, after the argmax(-1) has been calculated - # by preprocess_logits_for_metrics - labels = labels.reshape(-1) - preds = preds.reshape(-1) - mask = labels != -100 - labels = labels[mask] - preds = preds[mask] - return metric.compute(predictions=preds, references=labels) - - # Data collator - # This one will take care of randomly masking the tokens. - pad_to_multiple_of_8 = data_args.line_by_line and training_args.fp16 and not data_args.pad_to_max_length - data_collator = DataCollatorForLanguageModeling( - tokenizer=tokenizer, - mlm_probability=data_args.mlm_probability, - pad_to_multiple_of=8 if pad_to_multiple_of_8 else None, - ) - - # Initialize our Trainer - trainer = Trainer( - model=model, - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, - tokenizer=tokenizer, - data_collator=data_collator, - compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None, - preprocess_logits_for_metrics=preprocess_logits_for_metrics - if training_args.do_eval and not is_torch_tpu_available() - else None, - ) - - # Training - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() # Saves the tokenizer too for easy upload - metrics = train_result.metrics - - max_train_samples = ( - data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - if training_args.do_eval: - logger.info("*** Evaluate ***") - - metrics = trainer.evaluate() - - max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) - metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) - try: - perplexity = math.exp(metrics["eval_loss"]) - except OverflowError: - perplexity = float("inf") - metrics["perplexity"] = perplexity - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"} - if data_args.dataset_name is not None: - kwargs["dataset_tags"] = data_args.dataset_name - if data_args.dataset_config_name is not None: - kwargs["dataset_args"] = data_args.dataset_config_name - kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" - else: - kwargs["dataset"] = data_args.dataset_name - - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/models/align/convert_align_tf_to_hf.py b/spaces/chendl/compositional_test/transformers/src/transformers/models/align/convert_align_tf_to_hf.py deleted file mode 100644 index fbf53844ab9c9cea67f4d576bb0bc865ebfb0dd8..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/src/transformers/models/align/convert_align_tf_to_hf.py +++ /dev/null @@ -1,387 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Convert ALIGN checkpoints from the original repository.""" - -import argparse -import os - -import align -import numpy as np -import requests -import tensorflow as tf -import torch -from PIL import Image -from tokenizer import Tokenizer - -from transformers import ( - AlignConfig, - AlignModel, - AlignProcessor, - BertConfig, - BertTokenizer, - EfficientNetConfig, - EfficientNetImageProcessor, -) -from transformers.utils import logging - - -logging.set_verbosity_info() -logger = logging.get_logger(__name__) - - -def preprocess(image): - image = tf.image.resize(image, (346, 346)) - image = tf.image.crop_to_bounding_box(image, (346 - 289) // 2, (346 - 289) // 2, 289, 289) - return image - - -def get_align_config(): - vision_config = EfficientNetConfig.from_pretrained("google/efficientnet-b7") - vision_config.image_size = 289 - vision_config.hidden_dim = 640 - vision_config.id2label = {"0": "LABEL_0", "1": "LABEL_1"} - vision_config.label2id = {"LABEL_0": 0, "LABEL_1": 1} - vision_config.depthwise_padding = [] - - text_config = BertConfig() - config = AlignConfig.from_text_vision_configs( - text_config=text_config, vision_config=vision_config, projection_dim=640 - ) - return config - - -# We will verify our results on an image of cute cats -def prepare_img(): - url = "http://images.cocodataset.org/val2017/000000039769.jpg" - im = Image.open(requests.get(url, stream=True).raw) - return im - - -def get_processor(): - image_processor = EfficientNetImageProcessor( - do_center_crop=True, - rescale_factor=1 / 127.5, - rescale_offset=True, - do_normalize=False, - include_top=False, - resample=Image.BILINEAR, - ) - tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") - tokenizer.model_max_length = 64 - processor = AlignProcessor(image_processor=image_processor, tokenizer=tokenizer) - return processor - - -# here we list all keys to be renamed (original name on the left, our name on the right) -def rename_keys(original_param_names): - # EfficientNet image encoder - block_names = [v.split("_")[0].split("block")[1] for v in original_param_names if v.startswith("block")] - block_names = list(set(block_names)) - block_names = sorted(block_names) - num_blocks = len(block_names) - block_name_mapping = {b: str(i) for b, i in zip(block_names, range(num_blocks))} - - rename_keys = [] - rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight")) - rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight")) - rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias")) - rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean")) - rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var")) - - for b in block_names: - hf_b = block_name_mapping[b] - rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight")) - rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight")) - rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias")) - rename_keys.append( - (f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") - ) - rename_keys.append( - (f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") - ) - rename_keys.append( - (f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") - ) - rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight")) - rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias")) - rename_keys.append( - (f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") - ) - rename_keys.append( - (f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") - ) - - rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight")) - rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias")) - rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight")) - rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias")) - rename_keys.append( - (f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight") - ) - rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight")) - rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias")) - rename_keys.append( - (f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean") - ) - rename_keys.append( - (f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var") - ) - - key_mapping = {} - for item in rename_keys: - if item[0] in original_param_names: - key_mapping[item[0]] = "vision_model." + item[1] - - # BERT text encoder - rename_keys = [] - old = "tf_bert_model/bert" - new = "text_model" - for i in range(12): - rename_keys.append( - ( - f"{old}/encoder/layer_._{i}/attention/self/query/kernel:0", - f"{new}.encoder.layer.{i}.attention.self.query.weight", - ) - ) - rename_keys.append( - ( - f"{old}/encoder/layer_._{i}/attention/self/query/bias:0", - f"{new}.encoder.layer.{i}.attention.self.query.bias", - ) - ) - rename_keys.append( - ( - f"{old}/encoder/layer_._{i}/attention/self/key/kernel:0", - f"{new}.encoder.layer.{i}.attention.self.key.weight", - ) - ) - rename_keys.append( - ( - f"{old}/encoder/layer_._{i}/attention/self/key/bias:0", - f"{new}.encoder.layer.{i}.attention.self.key.bias", - ) - ) - rename_keys.append( - ( - f"{old}/encoder/layer_._{i}/attention/self/value/kernel:0", - f"{new}.encoder.layer.{i}.attention.self.value.weight", - ) - ) - rename_keys.append( - ( - f"{old}/encoder/layer_._{i}/attention/self/value/bias:0", - f"{new}.encoder.layer.{i}.attention.self.value.bias", - ) - ) - rename_keys.append( - ( - f"{old}/encoder/layer_._{i}/attention/output/dense/kernel:0", - f"{new}.encoder.layer.{i}.attention.output.dense.weight", - ) - ) - rename_keys.append( - ( - f"{old}/encoder/layer_._{i}/attention/output/dense/bias:0", - f"{new}.encoder.layer.{i}.attention.output.dense.bias", - ) - ) - rename_keys.append( - ( - f"{old}/encoder/layer_._{i}/attention/output/LayerNorm/gamma:0", - f"{new}.encoder.layer.{i}.attention.output.LayerNorm.weight", - ) - ) - rename_keys.append( - ( - f"{old}/encoder/layer_._{i}/attention/output/LayerNorm/beta:0", - f"{new}.encoder.layer.{i}.attention.output.LayerNorm.bias", - ) - ) - rename_keys.append( - ( - f"{old}/encoder/layer_._{i}/intermediate/dense/kernel:0", - f"{new}.encoder.layer.{i}.intermediate.dense.weight", - ) - ) - rename_keys.append( - ( - f"{old}/encoder/layer_._{i}/intermediate/dense/bias:0", - f"{new}.encoder.layer.{i}.intermediate.dense.bias", - ) - ) - rename_keys.append( - (f"{old}/encoder/layer_._{i}/output/dense/kernel:0", f"{new}.encoder.layer.{i}.output.dense.weight") - ) - rename_keys.append( - (f"{old}/encoder/layer_._{i}/output/dense/bias:0", f"{new}.encoder.layer.{i}.output.dense.bias") - ) - rename_keys.append( - (f"{old}/encoder/layer_._{i}/output/LayerNorm/gamma:0", f"{new}.encoder.layer.{i}.output.LayerNorm.weight") - ) - rename_keys.append( - (f"{old}/encoder/layer_._{i}/output/LayerNorm/beta:0", f"{new}.encoder.layer.{i}.output.LayerNorm.bias") - ) - - rename_keys.append((f"{old}/embeddings/word_embeddings/weight:0", f"{new}.embeddings.word_embeddings.weight")) - rename_keys.append( - (f"{old}/embeddings/position_embeddings/embeddings:0", f"{new}.embeddings.position_embeddings.weight") - ) - rename_keys.append( - (f"{old}/embeddings/token_type_embeddings/embeddings:0", f"{new}.embeddings.token_type_embeddings.weight") - ) - rename_keys.append((f"{old}/embeddings/LayerNorm/gamma:0", f"{new}.embeddings.LayerNorm.weight")) - rename_keys.append((f"{old}/embeddings/LayerNorm/beta:0", f"{new}.embeddings.LayerNorm.bias")) - - rename_keys.append((f"{old}/pooler/dense/kernel:0", f"{new}.pooler.dense.weight")) - rename_keys.append((f"{old}/pooler/dense/bias:0", f"{new}.pooler.dense.bias")) - rename_keys.append(("dense/kernel:0", "text_projection.weight")) - rename_keys.append(("dense/bias:0", "text_projection.bias")) - rename_keys.append(("dense/bias:0", "text_projection.bias")) - rename_keys.append(("temperature:0", "temperature")) - - for item in rename_keys: - if item[0] in original_param_names: - key_mapping[item[0]] = item[1] - return key_mapping - - -def replace_params(hf_params, tf_params, key_mapping): - list(hf_params.keys()) - - for key, value in tf_params.items(): - if key not in key_mapping: - continue - - hf_key = key_mapping[key] - if "_conv" in key and "kernel" in key: - new_hf_value = torch.from_numpy(value).permute(3, 2, 0, 1) - elif "embeddings" in key: - new_hf_value = torch.from_numpy(value) - elif "depthwise_kernel" in key: - new_hf_value = torch.from_numpy(value).permute(2, 3, 0, 1) - elif "kernel" in key: - new_hf_value = torch.from_numpy(np.transpose(value)) - elif "temperature" in key: - new_hf_value = value - elif "bn/gamma" or "bn/beta" in key: - new_hf_value = torch.from_numpy(np.transpose(value)).squeeze() - else: - new_hf_value = torch.from_numpy(value) - - # Replace HF parameters with original TF model parameters - hf_params[hf_key].copy_(new_hf_value) - - -@torch.no_grad() -def convert_align_checkpoint(checkpoint_path, pytorch_dump_folder_path, save_model, push_to_hub): - """ - Copy/paste/tweak model's weights to our ALIGN structure. - """ - # Load original model - seq_length = 64 - tok = Tokenizer(seq_length) - original_model = align.Align("efficientnet-b7", "bert-base", 640, seq_length, tok.get_vocab_size()) - original_model.compile() - original_model.load_weights(checkpoint_path) - - tf_params = original_model.trainable_variables - tf_non_train_params = original_model.non_trainable_variables - tf_params = {param.name: param.numpy() for param in tf_params} - for param in tf_non_train_params: - tf_params[param.name] = param.numpy() - tf_param_names = list(tf_params.keys()) - - # Load HuggingFace model - config = get_align_config() - hf_model = AlignModel(config).eval() - hf_params = hf_model.state_dict() - - # Create src-to-dst parameter name mapping dictionary - print("Converting parameters...") - key_mapping = rename_keys(tf_param_names) - replace_params(hf_params, tf_params, key_mapping) - - # Initialize processor - processor = get_processor() - inputs = processor( - images=prepare_img(), text="A picture of a cat", padding="max_length", max_length=64, return_tensors="pt" - ) - - # HF model inference - hf_model.eval() - with torch.no_grad(): - outputs = hf_model(**inputs) - - hf_image_features = outputs.image_embeds.detach().numpy() - hf_text_features = outputs.text_embeds.detach().numpy() - - # Original model inference - original_model.trainable = False - tf_image_processor = EfficientNetImageProcessor( - do_center_crop=True, - do_rescale=False, - do_normalize=False, - include_top=False, - resample=Image.BILINEAR, - ) - image = tf_image_processor(images=prepare_img(), return_tensors="tf", data_format="channels_last")["pixel_values"] - text = tok(tf.constant(["A picture of a cat"])) - - image_features = original_model.image_encoder(image, training=False) - text_features = original_model.text_encoder(text, training=False) - - image_features = tf.nn.l2_normalize(image_features, axis=-1) - text_features = tf.nn.l2_normalize(text_features, axis=-1) - - # Check whether original and HF model outputs match -> np.allclose - assert np.allclose(image_features, hf_image_features, atol=1e-3), "The predicted image features are not the same." - assert np.allclose(text_features, hf_text_features, atol=1e-3), "The predicted text features are not the same." - print("Model outputs match!") - - if save_model: - # Create folder to save model - if not os.path.isdir(pytorch_dump_folder_path): - os.mkdir(pytorch_dump_folder_path) - # Save converted model and feature extractor - hf_model.save_pretrained(pytorch_dump_folder_path) - processor.save_pretrained(pytorch_dump_folder_path) - - if push_to_hub: - # Push model and feature extractor to hub - print("Pushing converted ALIGN to the hub...") - processor.push_to_hub("align-base") - hf_model.push_to_hub("align-base") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - # Required parameters - parser.add_argument( - "--checkpoint_path", - default="./weights/model-weights", - type=str, - help="Path to the pretrained TF ALIGN checkpoint.", - ) - parser.add_argument( - "--pytorch_dump_folder_path", - default="hf_model", - type=str, - help="Path to the output PyTorch model directory.", - ) - parser.add_argument("--save_model", action="store_true", help="Save model to local") - parser.add_argument("--push_to_hub", action="store_true", help="Push model and feature extractor to the hub") - - args = parser.parse_args() - convert_align_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) diff --git a/spaces/chronopt-research/ViTExCo/train_ddp.py b/spaces/chronopt-research/ViTExCo/train_ddp.py deleted file mode 100644 index 3fc255c50f7f7aad1667940e131e351a3e3d1ed7..0000000000000000000000000000000000000000 --- a/spaces/chronopt-research/ViTExCo/train_ddp.py +++ /dev/null @@ -1,637 +0,0 @@ -import os -import sys -import wandb -import argparse -import numpy as np -from tqdm import tqdm -from PIL import Image -from datetime import datetime -from zoneinfo import ZoneInfo -from time import gmtime, strftime -from collections import OrderedDict -import random - -import torch -import torch.nn as nn -import torch.optim as optim -import torch.backends.cudnn as cudnn -from torchvision.transforms import CenterCrop -from torch.utils.data import ConcatDataset, DataLoader, WeightedRandomSampler -import torchvision.transforms as torch_transforms -from torchvision.utils import make_grid - -from src.losses import ( - ContextualLoss, - ContextualLoss_forward, - Perceptual_loss, - consistent_loss_fn, - discriminator_loss_fn, - generator_loss_fn, - l1_loss_fn, - smoothness_loss_fn, -) -from src.models.CNN.GAN_models import Discriminator_x64 -from src.models.CNN.ColorVidNet import ColorVidNet -from src.models.CNN.FrameColor import frame_colorization -from src.models.CNN.NonlocalNet import WeightedAverage_color, NonlocalWeightedAverage, WarpNet, WarpNet_new -from src.models.vit.embed import EmbedModel -from src.models.vit.config import load_config -from src.data import transforms -from src.data.dataloader import VideosDataset, VideosDataset_ImageNet -from src.utils import CenterPad_threshold -from src.utils import ( - TimeHandler, - RGB2Lab, - ToTensor, - Normalize, - LossHandler, - WarpingLayer, - uncenter_l, - tensor_lab2rgb, - print_num_params, - SquaredPadding -) -from src.scheduler import PolynomialLR - -from torch.nn.parallel import DistributedDataParallel as DDP -import torch.distributed as dist -from torch.utils.data.distributed import DistributedSampler - - -parser = argparse.ArgumentParser() -parser.add_argument("--video_data_root_list", type=str, default="dataset") -parser.add_argument("--flow_data_root_list", type=str, default='flow') -parser.add_argument("--mask_data_root_list", type=str, default='mask') -parser.add_argument("--data_root_imagenet", default="imagenet", type=str) -parser.add_argument("--annotation_file_path", default="dataset/annotation.csv", type=str) -parser.add_argument("--imagenet_pairs_file", default="imagenet_pairs.txt", type=str) -parser.add_argument("--gpu_ids", type=str, default="0,1,2,3", help="separate by comma") -parser.add_argument("--workers", type=int, default=0) -parser.add_argument("--batch_size", type=int, default=2) -parser.add_argument("--image_size", type=int, default=[384, 384]) -parser.add_argument("--ic", type=int, default=7) -parser.add_argument("--epoch", type=int, default=40) -parser.add_argument("--resume_epoch", type=int, default=0) -parser.add_argument("--resume", action='store_true') -parser.add_argument("--load_pretrained_model", action='store_true') -parser.add_argument("--pretrained_model_dir", type=str, default='ckpt') -parser.add_argument("--lr", type=float, default=1e-4) -parser.add_argument("--beta1", type=float, default=0.5) -parser.add_argument("--lr_step", type=int, default=1) -parser.add_argument("--lr_gamma", type=float, default=0.9) -parser.add_argument("--checkpoint_dir", type=str, default="checkpoints") -parser.add_argument("--checkpoint_step", type=int, default=500) -parser.add_argument("--real_reference_probability", type=float, default=0.7) -parser.add_argument("--nonzero_placeholder_probability", type=float, default=0.0) -parser.add_argument("--domain_invariant", action='store_true') -parser.add_argument("--weigth_l1", type=float, default=2.0) -parser.add_argument("--weight_contextual", type=float, default="0.5") -parser.add_argument("--weight_perceptual", type=float, default="0.02") -parser.add_argument("--weight_smoothness", type=float, default="5.0") -parser.add_argument("--weight_gan", type=float, default="0.5") -parser.add_argument("--weight_nonlocal_smoothness", type=float, default="0.0") -parser.add_argument("--weight_nonlocal_consistent", type=float, default="0.0") -parser.add_argument("--weight_consistent", type=float, default="0.05") -parser.add_argument("--luminance_noise", type=float, default="2.0") -parser.add_argument("--permute_data", action='store_true') -parser.add_argument("--contextual_loss_direction", type=str, default="forward", help="forward or backward matching") -parser.add_argument("--batch_accum_size", type=int, default=10) -parser.add_argument("--epoch_train_discriminator", type=int, default=3) -parser.add_argument("--vit_version", type=str, default="vit_tiny_patch16_384") -parser.add_argument("--use_dummy", action='store_true') -parser.add_argument("--use_wandb", action='store_true') -parser.add_argument("--use_feature_transform", action='store_true') -parser.add_argument("--head_out_idx", type=str, default="8,9,10,11") -parser.add_argument("--wandb_token", type=str, default="") -parser.add_argument("--wandb_name", type=str, default="") - - -def ddp_setup(): - dist.init_process_group(backend="nccl") - local_rank = int(os.environ['LOCAL_RANK']) - return local_rank - -def ddp_cleanup(): - dist.destroy_process_group() - -def prepare_dataloader_ddp(dataset, batch_size=4, pin_memory=False, num_workers=0): - sampler = DistributedSampler(dataset, shuffle=True) - dataloader = DataLoader(dataset, - batch_size=batch_size, - pin_memory=pin_memory, - num_workers=num_workers, - sampler=sampler) - return dataloader - -def is_master_process(): - ddp_rank = int(os.environ['RANK']) - return ddp_rank == 0 - -def load_data(): - transforms_video = [ - SquaredPadding(target_size=opt.image_size[0]), - RGB2Lab(), - ToTensor(), - Normalize(), - ] - - train_dataset_videos = [ - VideosDataset( - video_data_root=video_data_root, - flow_data_root=flow_data_root, - mask_data_root=mask_data_root, - imagenet_folder=opt.data_root_imagenet, - annotation_file_path=opt.annotation_file_path, - image_size=opt.image_size, - image_transform=torch_transforms.Compose(transforms_video), - real_reference_probability=opt.real_reference_probability, - nonzero_placeholder_probability=opt.nonzero_placeholder_probability, - ) - for video_data_root, flow_data_root, mask_data_root in zip(opt.video_data_root_list, opt.flow_data_root_list, opt.mask_data_root_list) - ] - - transforms_imagenet = [SquaredPadding(target_size=opt.image_size[0]), RGB2Lab(), ToTensor(), Normalize()] - extra_reference_transform = [ - torch_transforms.RandomHorizontalFlip(0.5), - torch_transforms.RandomResizedCrop(480, (0.98, 1.0), ratio=(0.8, 1.2)), - ] - - train_dataset_imagenet = VideosDataset_ImageNet( - imagenet_data_root=opt.data_root_imagenet, - pairs_file=opt.imagenet_pairs_file, - image_size=opt.image_size, - transforms_imagenet=transforms_imagenet, - distortion_level=4, - brightnessjitter=5, - nonzero_placeholder_probability=opt.nonzero_placeholder_probability, - extra_reference_transform=extra_reference_transform, - real_reference_probability=opt.real_reference_probability, - ) - dataset_combined = ConcatDataset(train_dataset_videos + [train_dataset_imagenet]) - data_loader = prepare_dataloader_ddp(dataset_combined, - batch_size=opt.batch_size, - pin_memory=False, - num_workers=opt.workers) - return data_loader - -def save_checkpoints(saved_path): - # Make directory if the folder doesn't exists - os.makedirs(saved_path, exist_ok=True) - - # Save model - torch.save( - nonlocal_net.module.state_dict(), - os.path.join(saved_path, "nonlocal_net.pth"), - ) - torch.save( - colornet.module.state_dict(), - os.path.join(saved_path, "colornet.pth"), - ) - torch.save( - discriminator.module.state_dict(), - os.path.join(saved_path, "discriminator.pth"), - ) - torch.save( - embed_net.state_dict(), - os.path.join(saved_path, "embed_net.pth") - ) - - # Save learning state for restoring train - learning_state = { - "epoch": epoch_num, - "total_iter": total_iter, - "optimizer_g": optimizer_g.state_dict(), - "optimizer_d": optimizer_d.state_dict(), - "optimizer_schedule_g": step_optim_scheduler_g.state_dict(), - "optimizer_schedule_d": step_optim_scheduler_d.state_dict(), - } - - torch.save(learning_state, os.path.join(saved_path, "learning_state.pth")) - -def training_logger(): - if (total_iter % opt.checkpoint_step == 0) or (total_iter == len(data_loader)): - train_loss_dict = {"train/" + str(k): v / loss_handler.count_sample for k, v in loss_handler.loss_dict.items()} - train_loss_dict["train/opt_g_lr_1"] = step_optim_scheduler_g.get_last_lr()[0] - train_loss_dict["train/opt_g_lr_2"] = step_optim_scheduler_g.get_last_lr()[1] - train_loss_dict["train/opt_d_lr"] = step_optim_scheduler_d.get_last_lr()[0] - - alert_text = f"l1_loss: {l1_loss.item()}\npercep_loss: {perceptual_loss.item()}\nctx_loss: {contextual_loss_total.item()}\ncst_loss: {consistent_loss.item()}\nsm_loss: {smoothness_loss.item()}\ntotal: {total_loss.item()}" - - if opt.use_wandb: - wandb.log(train_loss_dict) - wandb.alert(title=f"Progress training #{total_iter}", text=alert_text) - - for idx in range(I_predict_rgb.shape[0]): - concated_I = make_grid( - [(I_predict_rgb[idx] * 255), (I_reference_rgb[idx] * 255), (I_current_rgb[idx] * 255)], nrow=3 - ) - wandb_concated_I = wandb.Image( - concated_I, - caption="[LEFT] Predict, [CENTER] Reference, [RIGHT] Ground truth\n[REF] {}, [FRAME] {}".format( - ref_path[idx], curr_frame_path[idx] - ), - ) - wandb.log({f"example_{idx}": wandb_concated_I}) - - # Save learning state checkpoint - # save_checkpoints(os.path.join(opt.checkpoint_dir, 'runs')) - loss_handler.reset() - - -def load_params(ckpt_file, local_rank, has_module=False): - params = torch.load(ckpt_file, map_location=f'cuda:{local_rank}') - new_params = [] - for key, value in params.items(): - new_params.append(("module."+key if has_module else key, value)) - return OrderedDict(new_params) - - -def parse(parser, save=True): - opt = parser.parse_args() - args = vars(opt) - - print("------------------------------ Options -------------------------------") - for k, v in sorted(args.items()): - print("%s: %s" % (str(k), str(v))) - print("-------------------------------- End ---------------------------------") - - if save: - file_name = os.path.join("opt.txt") - with open(file_name, "wt") as opt_file: - opt_file.write(os.path.basename(sys.argv[0]) + " " + strftime("%Y-%m-%d %H:%M:%S", gmtime()) + "\n") - opt_file.write("------------------------------ Options -------------------------------\n") - for k, v in sorted(args.items()): - opt_file.write("%s: %s\n" % (str(k), str(v))) - opt_file.write("-------------------------------- End ---------------------------------\n") - return opt - - -def gpu_setup(): - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - cudnn.benchmark = True - torch.cuda.set_device(opt.gpu_ids[0]) - device = torch.device("cuda") - print("running on GPU", opt.gpu_ids) - return device - - -if __name__ == "__main__": - ############################################## SETUP ############################################### - torch.multiprocessing.set_start_method("spawn", force=True) - # =============== GET PARSER OPTION ================ - opt = parse(parser) - opt.video_data_root_list = opt.video_data_root_list.split(",") - opt.flow_data_root_list = opt.flow_data_root_list.split(",") - opt.mask_data_root_list = opt.mask_data_root_list.split(",") - opt.gpu_ids = list(map(int, opt.gpu_ids.split(","))) - opt.head_out_idx = list(map(int, opt.head_out_idx.split(","))) - n_dim_output = 3 if opt.use_feature_transform else 4 - assert len(opt.head_out_idx) == 4, "Size of head_out_idx must be 4" - - # =================== INIT WANDB =================== -# if is_master_process(): - if opt.use_wandb: - print("Save images to Wandb") - if opt.wandb_token != "": - try: - wandb.login(key=opt.wandb_token) - except: - pass - if opt.use_wandb: - wandb.init( - project="video-colorization", - group=f"{opt.wandb_name} {datetime.now(tz=ZoneInfo('Asia/Ho_Chi_Minh')).strftime('%Y/%m/%d_%H-%M-%S')}", - #group="DDP" - ) - - # ================== SETUP DEVICE ================== - local_rank = ddp_setup() - # =================== VIT CONFIG =================== - cfg = load_config() - model_cfg = cfg["model"][opt.vit_version] - model_cfg["image_size"] = (384, 384) - model_cfg["backbone"] = opt.vit_version - model_cfg["dropout"] = 0.0 - model_cfg["drop_path_rate"] = 0.1 - model_cfg["n_cls"] = 10 - ############################################ LOAD DATA ############################################# - data_loader = load_data() - ########################################## DEFINE NETWORK ########################################## - - - colornet = DDP(ColorVidNet(opt.ic).to(local_rank), device_ids=[local_rank], output_device=local_rank) - if opt.use_feature_transform: - nonlocal_net = DDP(WarpNet().to(local_rank), device_ids=[local_rank], output_device=local_rank) - else: - nonlocal_net = DDP(WarpNet_new(model_cfg["d_model"]).to(local_rank), device_ids=[local_rank], output_device=local_rank) - discriminator = DDP(Discriminator_x64(ndf=64).to(local_rank), device_ids=[local_rank], output_device=local_rank) - weighted_layer_color = WeightedAverage_color().to(local_rank) - nonlocal_weighted_layer = NonlocalWeightedAverage().to(local_rank) - warping_layer = WarpingLayer(device=local_rank).to(local_rank) - embed_net = EmbedModel(model_cfg, head_out_idx=opt.head_out_idx, n_dim_output=n_dim_output, device=local_rank) - - if is_master_process(): - # Print number of parameters - print("-" * 59) - print("| TYPE | Model name | Num params |") - print("-" * 59) - - colornet_params = print_num_params(colornet) - nonlocal_net_params = print_num_params(nonlocal_net) - discriminator_params = print_num_params(discriminator) - weighted_layer_color_params = print_num_params(weighted_layer_color) - nonlocal_weighted_layer_params = print_num_params(nonlocal_weighted_layer) - warping_layer_params = print_num_params(warping_layer) - embed_net_params = print_num_params(embed_net) - print("-" * 59) - print( - f"| TOTAL | | {('{:,}'.format(colornet_params+nonlocal_net_params+discriminator_params+weighted_layer_color_params+nonlocal_weighted_layer_params+warping_layer_params+embed_net_params)).rjust(10)} |" - ) - print("-" * 59) - if opt.use_wandb: - wandb.watch(discriminator, log="all", log_freq=opt.checkpoint_step, idx=0) - wandb.watch(embed_net, log="all", log_freq=opt.checkpoint_step, idx=1) - wandb.watch(colornet, log="all", log_freq=opt.checkpoint_step, idx=2) - wandb.watch(nonlocal_net, log="all", log_freq=opt.checkpoint_step, idx=3) - - - - ###################################### DEFINE LOSS FUNCTIONS ####################################### - perceptual_loss_fn = Perceptual_loss(opt.domain_invariant, opt.weight_perceptual) - contextual_loss = ContextualLoss().to(local_rank) - contextual_forward_loss = ContextualLoss_forward().to(local_rank) - ######################################## DEFINE OPTIMIZERS ######################################### - optimizer_g = optim.AdamW( - [ - {"params": nonlocal_net.parameters(), "lr": opt.lr}, - {"params": colornet.parameters(), "lr": 2 * opt.lr}, - {"params": embed_net.parameters(), "lr": opt.lr}, - ], - betas=(0.5, 0.999), - eps=1e-5, - amsgrad=True, - ) - - optimizer_d = optim.AdamW( - filter(lambda p: p.requires_grad, discriminator.parameters()), - lr=opt.lr, - betas=(0.5, 0.999), - amsgrad=True, - ) - - step_optim_scheduler_g = PolynomialLR( - optimizer_g, - step_size=opt.lr_step, - iter_warmup=0, - iter_max=len(data_loader) * opt.epoch, - power=0.9, - min_lr=1e-8 - ) - step_optim_scheduler_d = PolynomialLR( - optimizer_d, - step_size=opt.lr_step, - iter_warmup=0, - iter_max=len(data_loader) * opt.epoch, - power=0.9, - min_lr=1e-8 - ) - ########################################## DEFINE OTHERS ########################################### - downsampling_by2 = nn.AvgPool2d(kernel_size=2).to(local_rank) - # timer_handler = TimeHandler() - loss_handler = LossHandler() - ############################################## TRAIN ############################################### - - # ============= USE PRETRAINED OR NOT ============== - if opt.load_pretrained_model: - nonlocal_net.load_state_dict(load_params(os.path.join(opt.pretrained_model_dir, "nonlocal_net.pth"), - local_rank, - has_module=True)) - colornet.load_state_dict(load_params(os.path.join(opt.pretrained_model_dir, "colornet.pth"), - local_rank, - has_module=True)) - discriminator.load_state_dict(load_params(os.path.join(opt.pretrained_model_dir, "discriminator.pth"), - local_rank, - has_module=True)) - embed_net_params = load_params(os.path.join(opt.pretrained_model_dir, "embed_net.pth"), - local_rank, - has_module=False) - if "module.vit.heads_out" in embed_net_params: - embed_net_params.pop("module.vit.heads_out") - elif "vit.heads_out" in embed_net_params: - embed_net_params.pop("vit.heads_out") - embed_net.load_state_dict(embed_net_params) - - learning_checkpoint = torch.load(os.path.join(opt.pretrained_model_dir, "learning_state.pth")) - optimizer_g.load_state_dict(learning_checkpoint["optimizer_g"]) - optimizer_d.load_state_dict(learning_checkpoint["optimizer_d"]) - step_optim_scheduler_g.load_state_dict(learning_checkpoint["optimizer_schedule_g"]) - step_optim_scheduler_d.load_state_dict(learning_checkpoint["optimizer_schedule_d"]) - total_iter = learning_checkpoint['total_iter'] - start_epoch = learning_checkpoint['epoch']+1 - else: - total_iter = 0 - start_epoch = 1 - - - - for epoch_num in range(start_epoch, opt.epoch+1): - data_loader.sampler.set_epoch(epoch_num-1) - - if is_master_process(): - train_progress_bar = tqdm( - data_loader, - desc =f'Epoch {epoch_num}[Training]', - position = 0, - leave = False - ) - else: - train_progress_bar = data_loader - for iter, sample in enumerate(train_progress_bar): - # timer_handler.compute_time("load_sample") - total_iter += 1 - # =============== LOAD DATA SAMPLE ================ - ( - I_last_lab, ######## (3, H, W) - I_current_lab, ##### (3, H, W) - I_reference_lab, ### (3, H, W) - flow_forward, ###### (2, H, W) - mask, ############## (1, H, W) - placeholder_lab, ### (3, H, W) - self_ref_flag, ##### (3, H, W) - prev_frame_path, - curr_frame_path, - ref_path, - ) = sample - - I_last_lab = I_last_lab.to(local_rank) - I_current_lab = I_current_lab.to(local_rank) - I_reference_lab = I_reference_lab.to(local_rank) - flow_forward = flow_forward.to(local_rank) - mask = mask.to(local_rank) - placeholder_lab = placeholder_lab.to(local_rank) - self_ref_flag = self_ref_flag.to(local_rank) - - I_last_l = I_last_lab[:, 0:1, :, :] - I_last_ab = I_last_lab[:, 1:3, :, :] - I_current_l = I_current_lab[:, 0:1, :, :] - I_current_ab = I_current_lab[:, 1:3, :, :] - I_reference_l = I_reference_lab[:, 0:1, :, :] - I_reference_ab = I_reference_lab[:, 1:3, :, :] - I_reference_rgb = tensor_lab2rgb(torch.cat((uncenter_l(I_reference_l), I_reference_ab), dim=1)) - - # _load_sample_time = timer_handler.compute_time("load_sample") - # timer_handler.compute_time("forward_model") - - features_B = embed_net(I_reference_rgb) - _, B_feat_1, B_feat_2, B_feat_3 = features_B - - # ================== COLORIZATION ================== - # The last frame - I_last_ab_predict, I_last_nonlocal_lab_predict = frame_colorization( - IA_l=I_last_l, - IB_lab=I_reference_lab, - IA_last_lab=placeholder_lab, - features_B=features_B, - embed_net=embed_net, - colornet=colornet, - nonlocal_net=nonlocal_net, - luminance_noise=opt.luminance_noise, - ) - I_last_lab_predict = torch.cat((I_last_l, I_last_ab_predict), dim=1) - - # The current frame - I_current_ab_predict, I_current_nonlocal_lab_predict = frame_colorization( - IA_l=I_current_l, - IB_lab=I_reference_lab, - IA_last_lab=I_last_lab_predict, - features_B=features_B, - embed_net=embed_net, - colornet=colornet, - nonlocal_net=nonlocal_net, - luminance_noise=opt.luminance_noise, - ) - I_current_lab_predict = torch.cat((I_last_l, I_current_ab_predict), dim=1) - - # ================ UPDATE GENERATOR ================ - if opt.weight_gan > 0: - optimizer_g.zero_grad() - optimizer_d.zero_grad() - fake_data_lab = torch.cat( - ( - uncenter_l(I_current_l), - I_current_ab_predict, - uncenter_l(I_last_l), - I_last_ab_predict, - ), - dim=1, - ) - real_data_lab = torch.cat( - ( - uncenter_l(I_current_l), - I_current_ab, - uncenter_l(I_last_l), - I_last_ab, - ), - dim=1, - ) - - if opt.permute_data: - batch_index = torch.arange(-1, opt.batch_size - 1, dtype=torch.long) - real_data_lab = real_data_lab[batch_index, ...] - - discriminator_loss = discriminator_loss_fn(real_data_lab, fake_data_lab, discriminator) - discriminator_loss.backward() - optimizer_d.step() - - optimizer_g.zero_grad() - optimizer_d.zero_grad() - - # ================== COMPUTE LOSS ================== - # L1 loss - l1_loss = l1_loss_fn(I_current_ab, I_current_ab_predict) * opt.weigth_l1 - - # Generator_loss. TODO: freeze this to train some first epoch - if epoch_num > opt.epoch_train_discriminator: - generator_loss = generator_loss_fn(real_data_lab, fake_data_lab, discriminator, opt.weight_gan, local_rank) - - # Perceptual Loss - I_predict_rgb = tensor_lab2rgb(torch.cat((uncenter_l(I_current_l), I_current_ab_predict), dim=1)) - _, pred_feat_1, pred_feat_2, pred_feat_3 = embed_net(I_predict_rgb) - - I_current_rgb = tensor_lab2rgb(torch.cat((uncenter_l(I_current_l), I_current_ab), dim=1)) - A_feat_0, _, _, A_feat_3 = embed_net(I_current_rgb) - - perceptual_loss = perceptual_loss_fn(A_feat_3, pred_feat_3) - - # Contextual Loss - contextual_style5_1 = torch.mean(contextual_forward_loss(pred_feat_3, B_feat_3.detach())) * 8 - contextual_style4_1 = torch.mean(contextual_forward_loss(pred_feat_2, B_feat_2.detach())) * 4 - contextual_style3_1 = torch.mean(contextual_forward_loss(pred_feat_1, B_feat_1.detach())) * 2 - - contextual_loss_total = ( - contextual_style5_1 + contextual_style4_1 + contextual_style3_1 - ) * opt.weight_contextual - - # Consistent Loss - consistent_loss = consistent_loss_fn( - I_current_lab_predict, - I_last_ab_predict, - I_current_nonlocal_lab_predict, - I_last_nonlocal_lab_predict, - flow_forward, - mask, - warping_layer, - weight_consistent=opt.weight_consistent, - weight_nonlocal_consistent=opt.weight_nonlocal_consistent, - device=local_rank, - ) - - # Smoothness loss - smoothness_loss = smoothness_loss_fn( - I_current_l, - I_current_lab, - I_current_ab_predict, - A_feat_0, - weighted_layer_color, - nonlocal_weighted_layer, - weight_smoothness=opt.weight_smoothness, - weight_nonlocal_smoothness=opt.weight_nonlocal_smoothness, - device=local_rank - ) - - # Total loss - total_loss = l1_loss + perceptual_loss + contextual_loss_total + consistent_loss + smoothness_loss - if epoch_num > opt.epoch_train_discriminator: - total_loss += generator_loss - - # Add loss to loss handler - loss_handler.add_loss(key="total_loss", loss=total_loss.item()) - loss_handler.add_loss(key="l1_loss", loss=l1_loss.item()) - loss_handler.add_loss(key="perceptual_loss", loss=perceptual_loss.item()) - loss_handler.add_loss(key="contextual_loss", loss=contextual_loss_total.item()) - loss_handler.add_loss(key="consistent_loss", loss=consistent_loss.item()) - loss_handler.add_loss(key="smoothness_loss", loss=smoothness_loss.item()) - loss_handler.add_loss(key="discriminator_loss", loss=discriminator_loss.item()) - if epoch_num > opt.epoch_train_discriminator: - loss_handler.add_loss(key="generator_loss", loss=generator_loss.item()) - loss_handler.count_one_sample() - - total_loss.backward() - - optimizer_g.step() - step_optim_scheduler_g.step() - step_optim_scheduler_d.step() - - # _forward_model_time = timer_handler.compute_time("forward_model") - - # timer_handler.compute_time("training_logger") - training_logger() - # _training_logger_time = timer_handler.compute_time("training_logger") - - #### - if is_master_process(): - save_checkpoints(os.path.join(opt.checkpoint_dir, f"epoch_{epoch_num}")) - #### - if opt.use_wandb: - wandb.finish() - ddp_cleanup() \ No newline at end of file diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/JpegPresets.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/JpegPresets.py deleted file mode 100644 index a678e248e9ab2465738ea79f7f5c4bbc260c1919..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/JpegPresets.py +++ /dev/null @@ -1,240 +0,0 @@ -""" -JPEG quality settings equivalent to the Photoshop settings. -Can be used when saving JPEG files. - -The following presets are available by default: -``web_low``, ``web_medium``, ``web_high``, ``web_very_high``, ``web_maximum``, -``low``, ``medium``, ``high``, ``maximum``. -More presets can be added to the :py:data:`presets` dict if needed. - -To apply the preset, specify:: - - quality="preset_name" - -To apply only the quantization table:: - - qtables="preset_name" - -To apply only the subsampling setting:: - - subsampling="preset_name" - -Example:: - - im.save("image_name.jpg", quality="web_high") - -Subsampling ------------ - -Subsampling is the practice of encoding images by implementing less resolution -for chroma information than for luma information. -(ref.: https://en.wikipedia.org/wiki/Chroma_subsampling) - -Possible subsampling values are 0, 1 and 2 that correspond to 4:4:4, 4:2:2 and -4:2:0. - -You can get the subsampling of a JPEG with the -:func:`.JpegImagePlugin.get_sampling` function. - -In JPEG compressed data a JPEG marker is used instead of an EXIF tag. -(ref.: https://exiv2.org/tags.html) - - -Quantization tables -------------------- - -They are values use by the DCT (Discrete cosine transform) to remove -*unnecessary* information from the image (the lossy part of the compression). -(ref.: https://en.wikipedia.org/wiki/Quantization_matrix#Quantization_matrices, -https://en.wikipedia.org/wiki/JPEG#Quantization) - -You can get the quantization tables of a JPEG with:: - - im.quantization - -This will return a dict with a number of lists. You can pass this dict -directly as the qtables argument when saving a JPEG. - -The quantization table format in presets is a list with sublists. These formats -are interchangeable. - -Libjpeg ref.: -https://web.archive.org/web/20120328125543/http://www.jpegcameras.com/libjpeg/libjpeg-3.html - -""" - -# fmt: off -presets = { - 'web_low': {'subsampling': 2, # "4:2:0" - 'quantization': [ - [20, 16, 25, 39, 50, 46, 62, 68, - 16, 18, 23, 38, 38, 53, 65, 68, - 25, 23, 31, 38, 53, 65, 68, 68, - 39, 38, 38, 53, 65, 68, 68, 68, - 50, 38, 53, 65, 68, 68, 68, 68, - 46, 53, 65, 68, 68, 68, 68, 68, - 62, 65, 68, 68, 68, 68, 68, 68, - 68, 68, 68, 68, 68, 68, 68, 68], - [21, 25, 32, 38, 54, 68, 68, 68, - 25, 28, 24, 38, 54, 68, 68, 68, - 32, 24, 32, 43, 66, 68, 68, 68, - 38, 38, 43, 53, 68, 68, 68, 68, - 54, 54, 66, 68, 68, 68, 68, 68, - 68, 68, 68, 68, 68, 68, 68, 68, - 68, 68, 68, 68, 68, 68, 68, 68, - 68, 68, 68, 68, 68, 68, 68, 68] - ]}, - 'web_medium': {'subsampling': 2, # "4:2:0" - 'quantization': [ - [16, 11, 11, 16, 23, 27, 31, 30, - 11, 12, 12, 15, 20, 23, 23, 30, - 11, 12, 13, 16, 23, 26, 35, 47, - 16, 15, 16, 23, 26, 37, 47, 64, - 23, 20, 23, 26, 39, 51, 64, 64, - 27, 23, 26, 37, 51, 64, 64, 64, - 31, 23, 35, 47, 64, 64, 64, 64, - 30, 30, 47, 64, 64, 64, 64, 64], - [17, 15, 17, 21, 20, 26, 38, 48, - 15, 19, 18, 17, 20, 26, 35, 43, - 17, 18, 20, 22, 26, 30, 46, 53, - 21, 17, 22, 28, 30, 39, 53, 64, - 20, 20, 26, 30, 39, 48, 64, 64, - 26, 26, 30, 39, 48, 63, 64, 64, - 38, 35, 46, 53, 64, 64, 64, 64, - 48, 43, 53, 64, 64, 64, 64, 64] - ]}, - 'web_high': {'subsampling': 0, # "4:4:4" - 'quantization': [ - [6, 4, 4, 6, 9, 11, 12, 16, - 4, 5, 5, 6, 8, 10, 12, 12, - 4, 5, 5, 6, 10, 12, 14, 19, - 6, 6, 6, 11, 12, 15, 19, 28, - 9, 8, 10, 12, 16, 20, 27, 31, - 11, 10, 12, 15, 20, 27, 31, 31, - 12, 12, 14, 19, 27, 31, 31, 31, - 16, 12, 19, 28, 31, 31, 31, 31], - [7, 7, 13, 24, 26, 31, 31, 31, - 7, 12, 16, 21, 31, 31, 31, 31, - 13, 16, 17, 31, 31, 31, 31, 31, - 24, 21, 31, 31, 31, 31, 31, 31, - 26, 31, 31, 31, 31, 31, 31, 31, - 31, 31, 31, 31, 31, 31, 31, 31, - 31, 31, 31, 31, 31, 31, 31, 31, - 31, 31, 31, 31, 31, 31, 31, 31] - ]}, - 'web_very_high': {'subsampling': 0, # "4:4:4" - 'quantization': [ - [2, 2, 2, 2, 3, 4, 5, 6, - 2, 2, 2, 2, 3, 4, 5, 6, - 2, 2, 2, 2, 4, 5, 7, 9, - 2, 2, 2, 4, 5, 7, 9, 12, - 3, 3, 4, 5, 8, 10, 12, 12, - 4, 4, 5, 7, 10, 12, 12, 12, - 5, 5, 7, 9, 12, 12, 12, 12, - 6, 6, 9, 12, 12, 12, 12, 12], - [3, 3, 5, 9, 13, 15, 15, 15, - 3, 4, 6, 11, 14, 12, 12, 12, - 5, 6, 9, 14, 12, 12, 12, 12, - 9, 11, 14, 12, 12, 12, 12, 12, - 13, 14, 12, 12, 12, 12, 12, 12, - 15, 12, 12, 12, 12, 12, 12, 12, - 15, 12, 12, 12, 12, 12, 12, 12, - 15, 12, 12, 12, 12, 12, 12, 12] - ]}, - 'web_maximum': {'subsampling': 0, # "4:4:4" - 'quantization': [ - [1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 2, - 1, 1, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 1, 1, 2, 2, 3, - 1, 1, 1, 1, 2, 2, 3, 3, - 1, 1, 1, 2, 2, 3, 3, 3, - 1, 1, 2, 2, 3, 3, 3, 3], - [1, 1, 1, 2, 2, 3, 3, 3, - 1, 1, 1, 2, 3, 3, 3, 3, - 1, 1, 1, 3, 3, 3, 3, 3, - 2, 2, 3, 3, 3, 3, 3, 3, - 2, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3] - ]}, - 'low': {'subsampling': 2, # "4:2:0" - 'quantization': [ - [18, 14, 14, 21, 30, 35, 34, 17, - 14, 16, 16, 19, 26, 23, 12, 12, - 14, 16, 17, 21, 23, 12, 12, 12, - 21, 19, 21, 23, 12, 12, 12, 12, - 30, 26, 23, 12, 12, 12, 12, 12, - 35, 23, 12, 12, 12, 12, 12, 12, - 34, 12, 12, 12, 12, 12, 12, 12, - 17, 12, 12, 12, 12, 12, 12, 12], - [20, 19, 22, 27, 20, 20, 17, 17, - 19, 25, 23, 14, 14, 12, 12, 12, - 22, 23, 14, 14, 12, 12, 12, 12, - 27, 14, 14, 12, 12, 12, 12, 12, - 20, 14, 12, 12, 12, 12, 12, 12, - 20, 12, 12, 12, 12, 12, 12, 12, - 17, 12, 12, 12, 12, 12, 12, 12, - 17, 12, 12, 12, 12, 12, 12, 12] - ]}, - 'medium': {'subsampling': 2, # "4:2:0" - 'quantization': [ - [12, 8, 8, 12, 17, 21, 24, 17, - 8, 9, 9, 11, 15, 19, 12, 12, - 8, 9, 10, 12, 19, 12, 12, 12, - 12, 11, 12, 21, 12, 12, 12, 12, - 17, 15, 19, 12, 12, 12, 12, 12, - 21, 19, 12, 12, 12, 12, 12, 12, - 24, 12, 12, 12, 12, 12, 12, 12, - 17, 12, 12, 12, 12, 12, 12, 12], - [13, 11, 13, 16, 20, 20, 17, 17, - 11, 14, 14, 14, 14, 12, 12, 12, - 13, 14, 14, 14, 12, 12, 12, 12, - 16, 14, 14, 12, 12, 12, 12, 12, - 20, 14, 12, 12, 12, 12, 12, 12, - 20, 12, 12, 12, 12, 12, 12, 12, - 17, 12, 12, 12, 12, 12, 12, 12, - 17, 12, 12, 12, 12, 12, 12, 12] - ]}, - 'high': {'subsampling': 0, # "4:4:4" - 'quantization': [ - [6, 4, 4, 6, 9, 11, 12, 16, - 4, 5, 5, 6, 8, 10, 12, 12, - 4, 5, 5, 6, 10, 12, 12, 12, - 6, 6, 6, 11, 12, 12, 12, 12, - 9, 8, 10, 12, 12, 12, 12, 12, - 11, 10, 12, 12, 12, 12, 12, 12, - 12, 12, 12, 12, 12, 12, 12, 12, - 16, 12, 12, 12, 12, 12, 12, 12], - [7, 7, 13, 24, 20, 20, 17, 17, - 7, 12, 16, 14, 14, 12, 12, 12, - 13, 16, 14, 14, 12, 12, 12, 12, - 24, 14, 14, 12, 12, 12, 12, 12, - 20, 14, 12, 12, 12, 12, 12, 12, - 20, 12, 12, 12, 12, 12, 12, 12, - 17, 12, 12, 12, 12, 12, 12, 12, - 17, 12, 12, 12, 12, 12, 12, 12] - ]}, - 'maximum': {'subsampling': 0, # "4:4:4" - 'quantization': [ - [2, 2, 2, 2, 3, 4, 5, 6, - 2, 2, 2, 2, 3, 4, 5, 6, - 2, 2, 2, 2, 4, 5, 7, 9, - 2, 2, 2, 4, 5, 7, 9, 12, - 3, 3, 4, 5, 8, 10, 12, 12, - 4, 4, 5, 7, 10, 12, 12, 12, - 5, 5, 7, 9, 12, 12, 12, 12, - 6, 6, 9, 12, 12, 12, 12, 12], - [3, 3, 5, 9, 13, 15, 15, 15, - 3, 4, 6, 10, 14, 12, 12, 12, - 5, 6, 9, 14, 12, 12, 12, 12, - 9, 10, 14, 12, 12, 12, 12, 12, - 13, 14, 12, 12, 12, 12, 12, 12, - 15, 12, 12, 12, 12, 12, 12, 12, - 15, 12, 12, 12, 12, 12, 12, 12, - 15, 12, 12, 12, 12, 12, 12, 12] - ]}, -} -# fmt: on diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/telemetry/events.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/telemetry/events.py deleted file mode 100644 index 64c77574f9fb9d8ea578ddb017eb12328bfbca2a..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/telemetry/events.py +++ /dev/null @@ -1,27 +0,0 @@ -from dataclasses import dataclass -from typing import ClassVar -from chromadb.telemetry import TelemetryEvent - - -@dataclass -class ClientStartEvent(TelemetryEvent): - name: ClassVar[str] = "client_start" - - -@dataclass -class ServerStartEvent(TelemetryEvent): - name: ClassVar[str] = "server_start" - - -@dataclass -class CollectionAddEvent(TelemetryEvent): - name: ClassVar[str] = "collection_add" - collection_uuid: str - add_amount: int - - -@dataclass -class CollectionDeleteEvent(TelemetryEvent): - name: ClassVar[str] = "collection_delete" - collection_uuid: str - delete_amount: int diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/clickhouse_connect/datatypes/network.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/clickhouse_connect/datatypes/network.py deleted file mode 100644 index 14b7bc3b9a793b24dbffb3d07bfb84834b84daae..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/clickhouse_connect/datatypes/network.py +++ /dev/null @@ -1,129 +0,0 @@ -import socket -from ipaddress import IPv4Address, IPv6Address -from typing import Union, MutableSequence, Sequence - -from clickhouse_connect.datatypes.base import ClickHouseType -from clickhouse_connect.driver.common import write_array, int_size -from clickhouse_connect.driver.insert import InsertContext -from clickhouse_connect.driver.query import QueryContext -from clickhouse_connect.driver.types import ByteSource -from clickhouse_connect.driver.ctypes import data_conv - -IPV4_V6_MASK = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff' -V6_NULL = bytes(b'\x00' * 16) - - -# pylint: disable=protected-access -class IPv4(ClickHouseType): - _array_type = 'L' if int_size == 2 else 'I' - valid_formats = 'string', 'native', 'int' - python_type = IPv4Address - byte_size = 4 - - def _read_column_binary(self, source: ByteSource, num_rows: int, ctx: QueryContext): - if self.read_format(ctx) == 'int': - return source.read_array(self._array_type, num_rows) - if self.read_format(ctx) == 'string': - column = source.read_array(self._array_type, num_rows) - return [socket.inet_ntoa(x.to_bytes(4, 'big')) for x in column] - return data_conv.read_ipv4_col(source, num_rows) - - def _write_column_binary(self, column: Union[Sequence, MutableSequence], dest: bytearray, ctx: InsertContext): - first = self._first_value(column) - if isinstance(first, str): - fixed = 24, 16, 8, 0 - # pylint: disable=consider-using-generator - column = [(sum([int(b) << fixed[ix] for ix, b in enumerate(x.split('.'))])) if x else 0 for x in column] - else: - if self.nullable: - column = [x._ip if x else 0 for x in column] - else: - column = [x._ip for x in column] - write_array(self._array_type, column, dest) - - def _active_null(self, ctx: QueryContext): - fmt = self.read_format(ctx) - if ctx.use_none: - return None - if fmt == 'string': - return '0.0.0.0' - if fmt == 'int': - return 0 - return None - - -# pylint: disable=protected-access -class IPv6(ClickHouseType): - valid_formats = 'string', 'native' - python_type = IPv6Address - byte_size = 16 - - def _read_column_binary(self, source: ByteSource, num_rows: int, ctx: QueryContext): - if self.read_format(ctx) == 'string': - return self._read_binary_str(source, num_rows) - return self._read_binary_ip(source, num_rows) - - @staticmethod - def _read_binary_ip(source: ByteSource, num_rows: int): - fast_ip_v6 = IPv6Address.__new__ - fast_ip_v4 = IPv4Address.__new__ - with_scope_id = '_scope_id' in IPv6Address.__slots__ - new_col = [] - app = new_col.append - ifb = int.from_bytes - for _ in range(num_rows): - int_value = ifb(source.read_bytes(16), 'big') - if int_value >> 32 == 0xFFFF: - ipv4 = fast_ip_v4(IPv4Address) - ipv4._ip = int_value & 0xFFFFFFFF - app(ipv4) - else: - ipv6 = fast_ip_v6(IPv6Address) - ipv6._ip = int_value - if with_scope_id: - ipv6._scope_id = None - app(ipv6) - return new_col - - @staticmethod - def _read_binary_str(source: ByteSource, num_rows: int): - new_col = [] - app = new_col.append - v4mask = IPV4_V6_MASK - tov4 = socket.inet_ntoa - tov6 = socket.inet_ntop - af6 = socket.AF_INET6 - for _ in range(num_rows): - x = source.read_bytes(16) - if x[:12] == v4mask: - app(tov4(x[12:])) - else: - app(tov6(af6, x)) - return new_col - - def _write_column_binary(self, column: Union[Sequence, MutableSequence], dest: bytearray, ctx: InsertContext): - v = V6_NULL - first = self._first_value(column) - v4mask = IPV4_V6_MASK - af6 = socket.AF_INET6 - tov6 = socket.inet_pton - if isinstance(first, str): - for x in column: - if x is None: - dest += v - elif '.' in x: - dest += v4mask + bytes(int(b) for b in x.split('.')) - else: - dest += tov6(af6, x) - else: - for x in column: - if x is None: - dest += v - else: - b = x.packed - dest += b if len(b) == 16 else (v4mask + b) - - def _active_null(self, ctx): - if ctx.use_none: - return None - return '::' if self.read_format(ctx) == 'string' else V6_NULL diff --git a/spaces/cihyFjudo/fairness-paper-search/The Dirty Picture 3 Tamil Dubbed Movie Free Download Tips and Tricks to Avoid Malware and Viruses When Downloading.md b/spaces/cihyFjudo/fairness-paper-search/The Dirty Picture 3 Tamil Dubbed Movie Free Download Tips and Tricks to Avoid Malware and Viruses When Downloading.md deleted file mode 100644 index 1b28e49ff5fcca6676e87d9bef508c53bce4c495..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/The Dirty Picture 3 Tamil Dubbed Movie Free Download Tips and Tricks to Avoid Malware and Viruses When Downloading.md +++ /dev/null @@ -1,6 +0,0 @@ -

      The Dirty Picture 3 tamil dubbed movie free download


      Download File ->->->-> https://tinurli.com/2uwhAa



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/_tkinter_finder.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/_tkinter_finder.py deleted file mode 100644 index 597c21b5e385b7fe09191c9f5dd89b6600c22967..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/_tkinter_finder.py +++ /dev/null @@ -1,17 +0,0 @@ -""" Find compiled module linking to Tcl / Tk libraries -""" -import sys -import tkinter -from tkinter import _tkinter as tk - -try: - if hasattr(sys, "pypy_find_executable"): - TKINTER_LIB = tk.tklib_cffi.__file__ - else: - TKINTER_LIB = tk.__file__ -except AttributeError: - # _tkinter may be compiled directly into Python, in which case __file__ is - # not available. load_tkinter_funcs will check the binary first in any case. - TKINTER_LIB = None - -tk_version = str(tkinter.TkVersion) diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/_l_t_a_g.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/_l_t_a_g.py deleted file mode 100644 index 24f5e131f0c615dcf86b0494854d9a3a5a1284f2..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/_l_t_a_g.py +++ /dev/null @@ -1,64 +0,0 @@ -from fontTools.misc.textTools import bytesjoin, tobytes, safeEval -from . import DefaultTable -import struct - -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html - - -class table__l_t_a_g(DefaultTable.DefaultTable): - def __init__(self, tag=None): - DefaultTable.DefaultTable.__init__(self, tag) - self.version, self.flags = 1, 0 - self.tags = [] - - def addTag(self, tag): - """Add 'tag' to the list of langauge tags if not already there. - - Returns the integer index of 'tag' in the list of all tags. - """ - try: - return self.tags.index(tag) - except ValueError: - self.tags.append(tag) - return len(self.tags) - 1 - - def decompile(self, data, ttFont): - self.version, self.flags, numTags = struct.unpack(">LLL", data[:12]) - assert self.version == 1 - self.tags = [] - for i in range(numTags): - pos = 12 + i * 4 - offset, length = struct.unpack(">HH", data[pos : pos + 4]) - tag = data[offset : offset + length].decode("ascii") - self.tags.append(tag) - - def compile(self, ttFont): - dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))] - stringPool = "" - for tag in self.tags: - offset = stringPool.find(tag) - if offset < 0: - offset = len(stringPool) - stringPool = stringPool + tag - offset = offset + 12 + len(self.tags) * 4 - dataList.append(struct.pack(">HH", offset, len(tag))) - dataList.append(tobytes(stringPool)) - return bytesjoin(dataList) - - def toXML(self, writer, ttFont): - writer.simpletag("version", value=self.version) - writer.newline() - writer.simpletag("flags", value=self.flags) - writer.newline() - for tag in self.tags: - writer.simpletag("LanguageTag", tag=tag) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "tags"): - self.tags = [] - if name == "LanguageTag": - self.tags.append(attrs["tag"]) - elif "value" in attrs: - value = safeEval(attrs["value"]) - setattr(self, name, value) diff --git a/spaces/codejin/diffsingerkr/meldataset.py b/spaces/codejin/diffsingerkr/meldataset.py deleted file mode 100644 index 5990e753b459acfa1e2b951116c113c371f6eea2..0000000000000000000000000000000000000000 --- a/spaces/codejin/diffsingerkr/meldataset.py +++ /dev/null @@ -1,230 +0,0 @@ -############################################################################### -# MIT License -# -# Copyright (c) 2020 Jungil Kong -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -############################################################################### - -import math -import os -import random -import torch -import torch.utils.data -import numpy as np -from librosa.util import normalize -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def load_wav(full_path): - sampling_rate, data = read(full_path) - return data, sampling_rate - - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - if fmax not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device) - hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - - spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec) - spec = spectral_normalize_torch(spec) - - return spec - -def spectrogram(y, n_fft, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - spec = spectral_normalize_torch(spec) - - return spec - -def spec_energy(y, n_fft, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - energy = torch.norm(spec, dim= 1) - - return energy - -def get_dataset_filelist(a): - with open(a.input_training_file, 'r', encoding='utf-8') as fi: - training_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + '.wav') - for x in fi.read().split('\n') if len(x) > 0] - - with open(a.input_validation_file, 'r', encoding='utf-8') as fi: - validation_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + '.wav') - for x in fi.read().split('\n') if len(x) > 0] - return training_files, validation_files - - -class MelDataset(torch.utils.data.Dataset): - def __init__(self, training_files, segment_size, n_fft, num_mels, - hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1, - device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None): - self.audio_files = training_files - random.seed(1234) - if shuffle: - random.shuffle(self.audio_files) - self.segment_size = segment_size - self.sampling_rate = sampling_rate - self.split = split - self.n_fft = n_fft - self.num_mels = num_mels - self.hop_size = hop_size - self.win_size = win_size - self.fmin = fmin - self.fmax = fmax - self.fmax_loss = fmax_loss - self.cached_wav = None - self.n_cache_reuse = n_cache_reuse - self._cache_ref_count = 0 - self.device = device - self.fine_tuning = fine_tuning - self.base_mels_path = base_mels_path - - def __getitem__(self, index): - filename = self.audio_files[index] - if self._cache_ref_count == 0: - audio, sampling_rate = load_wav(filename) - audio = audio / MAX_WAV_VALUE - if not self.fine_tuning: - audio = normalize(audio) * 0.95 - self.cached_wav = audio - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - self._cache_ref_count = self.n_cache_reuse - else: - audio = self.cached_wav - self._cache_ref_count -= 1 - - audio = torch.FloatTensor(audio) - audio = audio.unsqueeze(0) - - if not self.fine_tuning: - if self.split: - if audio.size(1) >= self.segment_size: - max_audio_start = audio.size(1) - self.segment_size - audio_start = random.randint(0, max_audio_start) - audio = audio[:, audio_start:audio_start+self.segment_size] - else: - audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant') - - mel = mel_spectrogram(audio, self.n_fft, self.num_mels, - self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax, - center=False) - else: - mel = np.load( - os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy')) - mel = torch.from_numpy(mel) - - if len(mel.shape) < 3: - mel = mel.unsqueeze(0) - - if self.split: - frames_per_seg = math.ceil(self.segment_size / self.hop_size) - - if audio.size(1) >= self.segment_size: - mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1) - mel = mel[:, :, mel_start:mel_start + frames_per_seg] - audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size] - else: - mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant') - audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant') - - mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels, - self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss, - center=False) - - return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze()) - - def __len__(self): - return len(self.audio_files) diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/brenderpix.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/brenderpix.c deleted file mode 100644 index e95ab3d4afdf241ad013f597571f5323cf144718..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/brenderpix.c +++ /dev/null @@ -1,294 +0,0 @@ -/* - * BRender PIX (.pix) image decoder - * Copyright (c) 2012 Aleksi Nurmi - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/* Tested against samples from I-War / Independence War and Defiance. */ - -#include "libavutil/imgutils.h" - -#include "avcodec.h" -#include "bytestream.h" -#include "codec_internal.h" -#include "decode.h" - -#define HEADER1_CHUNK 0x03 -#define HEADER2_CHUNK 0x3D -#define IMAGE_DATA_CHUNK 0x21 - -/* In 8-bit colour mode, 256 colours are available at any time. Which 256 - * colours are available is determined by the contents of the hardware palette - * (or CLUT). In this case, the palette supplied with BRender (std.pal) has - * been loaded into the CLUT. - * - * The 256 colours in std.pal are divided into seven ranges, or `colour ramps'. - * The first 64 colours represent shades of grey ranging from very dark grey - * (black) to very light grey (white). The following colours are 32-element - * ramps for six colours as shown below. - */ -static const uint32_t std_pal_table[256] = { - // gray - 0xFF000000, 0xFF030303, 0xFF060606, 0xFF090909, 0xFF0C0C0C, 0xFF0F0F0F, - 0xFF121212, 0xFF151515, 0xFF181818, 0xFF1B1B1B, 0xFF1E1E1E, 0xFF212121, - 0xFF242424, 0xFF272727, 0xFF2A2A2A, 0xFF2D2D2D, 0xFF313131, 0xFF343434, - 0xFF373737, 0xFF3A3A3A, 0xFF3D3D3D, 0xFF404040, 0xFF434343, 0xFF464646, - 0xFF494949, 0xFF4C4C4C, 0xFF4F4F4F, 0xFF525252, 0xFF555555, 0xFF585858, - 0xFF5B5B5B, 0xFF5E5E5E, 0xFF626262, 0xFF656565, 0xFF686868, 0xFF6B6B6B, - 0xFF6E6E6E, 0xFF717171, 0xFF747474, 0xFF777777, 0xFF7A7A7A, 0xFF7D7D7D, - 0xFF808080, 0xFF838383, 0xFF868686, 0xFF898989, 0xFF8C8C8C, 0xFF8F8F8F, - 0xFF939393, 0xFF999999, 0xFFA0A0A0, 0xFFA7A7A7, 0xFFAEAEAE, 0xFFB4B4B4, - 0xFFBBBBBB, 0xFFC2C2C2, 0xFFC9C9C9, 0xFFCFCFCF, 0xFFD6D6D6, 0xFFDDDDDD, - 0xFFE4E4E4, 0xFFEAEAEA, 0xFFF1F1F1, 0xFFF8F8F8, - - // blue - 0xFF000000, 0xFF020209, 0xFF050513, 0xFF07071D, 0xFF0A0A27, 0xFF0C0C31, - 0xFF0F0F3B, 0xFF111145, 0xFF14144F, 0xFF161659, 0xFF181863, 0xFF1B1B6D, - 0xFF1E1E77, 0xFF202080, 0xFF22228A, 0xFF252594, 0xFF28289E, 0xFF2A2AA8, - 0xFF2D2DB2, 0xFF2F2FBC, 0xFF3131C6, 0xFF3434D0, 0xFF3737DA, 0xFF3939E4, - 0xFF3C3CEE, 0xFF5454F0, 0xFF6C6CF2, 0xFF8585F4, 0xFF9D9DF6, 0xFFB5B5F8, - 0xFFCECEFA, 0xFFE6E6FC, - - // green - 0xFF000000, 0xFF020902, 0xFF051305, 0xFF071D07, 0xFF0A270A, 0xFF0C310C, - 0xFF0F3B0F, 0xFF114511, 0xFF144F14, 0xFF165916, 0xFF186318, 0xFF1B6D1B, - 0xFF1E771E, 0xFF208020, 0xFF228A22, 0xFF259425, 0xFF289E28, 0xFF2AA82A, - 0xFF2DB22D, 0xFF2FBC2F, 0xFF31C631, 0xFF34D034, 0xFF37DA37, 0xFF39E439, - 0xFF3CEE3C, 0xFF54F054, 0xFF6CF26C, 0xFF85F485, 0xFF9DF69D, 0xFFB5F8B5, - 0xFFCEFACE, 0xFFE6FCE6, - - // cyan - 0xFF000000, 0xFF020909, 0xFF051313, 0xFF071D1D, 0xFF0A2727, 0xFF0C3131, - 0xFF0F3B3B, 0xFF114545, 0xFF144F4F, 0xFF165959, 0xFF186363, 0xFF1B6D6D, - 0xFF1E7777, 0xFF208080, 0xFF228A8A, 0xFF259494, 0xFF289E9E, 0xFF2AA8A8, - 0xFF2DB2B2, 0xFF2FBCBC, 0xFF31C6C6, 0xFF34D0D0, 0xFF37DADA, 0xFF39E4E4, - 0xFF3CEEEE, 0xFF54F0F0, 0xFF6CF2F2, 0xFF85F4F4, 0xFF9DF6F6, 0xFFB5F8F8, - 0xFFCEFAFA, 0xFFE6FCFC, - - // red - 0xFF000000, 0xFF090202, 0xFF130505, 0xFF1D0707, 0xFF270A0A, 0xFF310C0C, - 0xFF3B0F0F, 0xFF451111, 0xFF4F1414, 0xFF591616, 0xFF631818, 0xFF6D1B1B, - 0xFF771E1E, 0xFF802020, 0xFF8A2222, 0xFF942525, 0xFF9E2828, 0xFFA82A2A, - 0xFFB22D2D, 0xFFBC2F2F, 0xFFC63131, 0xFFD03434, 0xFFDA3737, 0xFFE43939, - 0xFFEE3C3C, 0xFFF05454, 0xFFF26C6C, 0xFFF48585, 0xFFF69D9D, 0xFFF8B5B5, - 0xFFFACECE, 0xFFFCE6E6, - - // magenta - 0xFF000000, 0xFF090209, 0xFF130513, 0xFF1D071D, 0xFF270A27, 0xFF310C31, - 0xFF3B0F3B, 0xFF451145, 0xFF4F144F, 0xFF591659, 0xFF631863, 0xFF6D1B6D, - 0xFF771E77, 0xFF802080, 0xFF8A228A, 0xFF942594, 0xFF9E289E, 0xFFA82AA8, - 0xFFB22DB2, 0xFFBC2FBC, 0xFFC631C6, 0xFFD034D0, 0xFFDA37DA, 0xFFE439E4, - 0xFFEE3CEE, 0xFFF054F0, 0xFFF26CF2, 0xFFF485F4, 0xFFF69DF6, 0xFFF8B5F8, - 0xFFFACEFA, 0xFFFCE6FC, - - // yellow - 0xFF000000, 0xFF090902, 0xFF131305, 0xFF1D1D07, 0xFF27270A, 0xFF31310C, - 0xFF3B3B0F, 0xFF454511, 0xFF4F4F14, 0xFF595916, 0xFF636318, 0xFF6D6D1B, - 0xFF77771E, 0xFF808020, 0xFF8A8A22, 0xFF949425, 0xFF9E9E28, 0xFFA8A82A, - 0xFFB2B22D, 0xFFBCBC2F, 0xFFC6C631, 0xFFD0D034, 0xFFDADA37, 0xFFE4E439, - 0xFFEEEE3C, 0xFFF0F054, 0xFFF2F26C, 0xFFF4F485, 0xFFF6F69D, 0xFFF8F8B5, - 0xFFFAFACE, 0xFFFCFCE6, -}; - -typedef struct PixHeader { - int width; - int height; - int format; -} PixHeader; - -static int pix_decode_header(PixHeader *out, GetByteContext *pgb) -{ - unsigned int header_len = bytestream2_get_be32(pgb); - - out->format = bytestream2_get_byte(pgb); - bytestream2_skip(pgb, 2); - out->width = bytestream2_get_be16(pgb); - out->height = bytestream2_get_be16(pgb); - - // the header is at least 11 bytes long; we read the first 7 - if (header_len < 11) - return AVERROR_INVALIDDATA; - - // skip the rest of the header - bytestream2_skip(pgb, header_len - 7); - - return 0; -} - -static int pix_decode_frame(AVCodecContext *avctx, AVFrame *frame, - int *got_frame, AVPacket *avpkt) -{ - int ret, i; - GetByteContext gb; - - unsigned int bytes_pp; - unsigned int magic[4]; - unsigned int chunk_type; - unsigned int data_len; - unsigned int bytes_per_scanline; - unsigned int bytes_left; - PixHeader hdr; - - bytestream2_init(&gb, avpkt->data, avpkt->size); - - magic[0] = bytestream2_get_be32(&gb); - magic[1] = bytestream2_get_be32(&gb); - magic[2] = bytestream2_get_be32(&gb); - magic[3] = bytestream2_get_be32(&gb); - - if (magic[0] != 0x12 || - magic[1] != 0x08 || - magic[2] != 0x02 || - magic[3] != 0x02) { - av_log(avctx, AV_LOG_ERROR, "Not a BRender PIX file.\n"); - return AVERROR_INVALIDDATA; - } - - chunk_type = bytestream2_get_be32(&gb); - if (chunk_type != HEADER1_CHUNK && chunk_type != HEADER2_CHUNK) { - av_log(avctx, AV_LOG_ERROR, "Invalid chunk type %d.\n", chunk_type); - return AVERROR_INVALIDDATA; - } - - ret = pix_decode_header(&hdr, &gb); - if (ret < 0) { - av_log(avctx, AV_LOG_ERROR, "Invalid header length.\n"); - return ret; - } - switch (hdr.format) { - case 3: - avctx->pix_fmt = AV_PIX_FMT_PAL8; - bytes_pp = 1; - break; - case 4: - avctx->pix_fmt = AV_PIX_FMT_RGB555BE; - bytes_pp = 2; - break; - case 5: - avctx->pix_fmt = AV_PIX_FMT_RGB565BE; - bytes_pp = 2; - break; - case 6: - avctx->pix_fmt = AV_PIX_FMT_RGB24; - bytes_pp = 3; - break; - case 7: - avctx->pix_fmt = AV_PIX_FMT_0RGB; - bytes_pp = 4; - break; - case 8: // ARGB - avctx->pix_fmt = AV_PIX_FMT_ARGB; - bytes_pp = 4; - break; - case 18: - avctx->pix_fmt = AV_PIX_FMT_YA8; - bytes_pp = 2; - break; - default: - avpriv_request_sample(avctx, "Format %d", hdr.format); - return AVERROR_PATCHWELCOME; - } - bytes_per_scanline = bytes_pp * hdr.width; - - if (bytestream2_get_bytes_left(&gb) < hdr.height * bytes_per_scanline) - return AVERROR_INVALIDDATA; - - if ((ret = ff_set_dimensions(avctx, hdr.width, hdr.height)) < 0) - return ret; - - if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) - return ret; - - chunk_type = bytestream2_get_be32(&gb); - - if (avctx->pix_fmt == AV_PIX_FMT_PAL8 && - (chunk_type == HEADER1_CHUNK || - chunk_type == HEADER2_CHUNK)) { - /* read palette data from data[1] */ - PixHeader palhdr; - uint32_t *pal_out = (uint32_t *)frame->data[1]; - - ret = pix_decode_header(&palhdr, &gb); - if (ret < 0) { - av_log(avctx, AV_LOG_ERROR, "Invalid palette header length.\n"); - return ret; - } - if (palhdr.format != 7) - avpriv_request_sample(avctx, "Palette not in RGB format"); - - chunk_type = bytestream2_get_be32(&gb); - data_len = bytestream2_get_be32(&gb); - bytestream2_skip(&gb, 8); - if (chunk_type != IMAGE_DATA_CHUNK || data_len != 1032 || - bytestream2_get_bytes_left(&gb) < 1032) { - av_log(avctx, AV_LOG_ERROR, "Invalid palette data.\n"); - return AVERROR_INVALIDDATA; - } - // palette data is surrounded by 8 null bytes (both top and bottom) - // convert 0RGB to machine endian format (ARGB32) - for (i = 0; i < 256; ++i) - *pal_out++ = (0xFFU << 24) | bytestream2_get_be32u(&gb); - bytestream2_skip(&gb, 8); - - frame->palette_has_changed = 1; - - chunk_type = bytestream2_get_be32(&gb); - } else if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { - /* no palette supplied, use the default one */ - uint32_t *pal_out = (uint32_t *)frame->data[1]; - - // TODO: add an AVOption to load custom palette files - av_log(avctx, AV_LOG_WARNING, - "Using default palette, colors might be off.\n"); - memcpy(pal_out, std_pal_table, sizeof(uint32_t) * 256); - - frame->palette_has_changed = 1; - } - - data_len = bytestream2_get_be32(&gb); - bytestream2_skip(&gb, 8); - - // read the image data to the buffer - bytes_left = bytestream2_get_bytes_left(&gb); - - if (chunk_type != IMAGE_DATA_CHUNK || data_len != bytes_left || - bytes_left / bytes_per_scanline < hdr.height) { - av_log(avctx, AV_LOG_ERROR, "Invalid image data.\n"); - return AVERROR_INVALIDDATA; - } - - av_image_copy_plane(frame->data[0], frame->linesize[0], - avpkt->data + bytestream2_tell(&gb), - bytes_per_scanline, - bytes_per_scanline, hdr.height); - - frame->pict_type = AV_PICTURE_TYPE_I; - frame->key_frame = 1; - *got_frame = 1; - - return avpkt->size; -} - -const FFCodec ff_brender_pix_decoder = { - .p.name = "brender_pix", - CODEC_LONG_NAME("BRender PIX image"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_BRENDER_PIX, - .p.capabilities = AV_CODEC_CAP_DR1, - FF_CODEC_DECODE_CB(pix_decode_frame), -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/hevc_lpf_sao_msa.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/hevc_lpf_sao_msa.c deleted file mode 100644 index cd94460f97bd21674ce74059777bb0d1fe64d131..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/hevc_lpf_sao_msa.c +++ /dev/null @@ -1,2746 +0,0 @@ -/* - * Copyright (c) 2015 -2017 Manojkumar Bhosale (Manojkumar.Bhosale@imgtec.com) - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/mips/generic_macros_msa.h" -#include "libavcodec/mips/hevcdsp_mips.h" - -static void hevc_loopfilter_luma_hor_msa(uint8_t *src, int32_t stride, - int32_t beta, const int32_t *tc, - const uint8_t *p_is_pcm, const uint8_t *q_is_pcm) -{ - uint8_t *p3 = src - (stride << 2); - uint8_t *p2 = src - ((stride << 1) + stride); - uint8_t *p1 = src - (stride << 1); - uint8_t *p0 = src - stride; - uint8_t *q0 = src; - uint8_t *q1 = src + stride; - uint8_t *q2 = src + (stride << 1); - uint8_t *q3 = src + (stride << 1) + stride; - uint8_t flag0, flag1; - int32_t dp00, dq00, dp30, dq30, d00, d30; - int32_t d0030, d0434; - int32_t dp04, dq04, dp34, dq34, d04, d34; - int32_t tc0, p_is_pcm0, q_is_pcm0, beta30, beta20, tc250; - int32_t tc4, p_is_pcm4, q_is_pcm4, tc254, tmp; - uint64_t dst_val0, dst_val1; - v16u8 dst0, dst1, dst2, dst3, dst4, dst5; - v2i64 cmp0, cmp1, cmp2, p_is_pcm_vec, q_is_pcm_vec; - v2i64 cmp3; - v8u16 temp0, temp1; - v8i16 temp2; - v8i16 tc_pos, tc_neg; - v8i16 diff0, diff1, delta0, delta1, delta2, abs_delta0; - v16i8 zero = { 0 }; - v8u16 p3_src, p2_src, p1_src, p0_src, q0_src, q1_src, q2_src, q3_src; - - dp00 = abs(p2[0] - (p1[0] << 1) + p0[0]); - dq00 = abs(q2[0] - (q1[0] << 1) + q0[0]); - dp30 = abs(p2[3] - (p1[3] << 1) + p0[3]); - dq30 = abs(q2[3] - (q1[3] << 1) + q0[3]); - d00 = dp00 + dq00; - d30 = dp30 + dq30; - dp04 = abs(p2[4] - (p1[4] << 1) + p0[4]); - dq04 = abs(q2[4] - (q1[4] << 1) + q0[4]); - dp34 = abs(p2[7] - (p1[7] << 1) + p0[7]); - dq34 = abs(q2[7] - (q1[7] << 1) + q0[7]); - d04 = dp04 + dq04; - d34 = dp34 + dq34; - - p_is_pcm0 = p_is_pcm[0]; - p_is_pcm4 = p_is_pcm[1]; - q_is_pcm0 = q_is_pcm[0]; - q_is_pcm4 = q_is_pcm[1]; - - cmp0 = __msa_fill_d(p_is_pcm0); - cmp1 = __msa_fill_d(p_is_pcm4); - p_is_pcm_vec = __msa_ilvev_d(cmp1, cmp0); - p_is_pcm_vec = __msa_ceqi_d(p_is_pcm_vec, 0); - - d0030 = (d00 + d30) >= beta; - d0434 = (d04 + d34) >= beta; - - cmp0 = (v2i64) __msa_fill_w(d0030); - cmp1 = (v2i64) __msa_fill_w(d0434); - cmp3 = (v2i64) __msa_ilvev_w((v4i32) cmp1, (v4i32) cmp0); - cmp3 = (v2i64) __msa_ceqi_w((v4i32) cmp3, 0); - - if ((!p_is_pcm0 || !p_is_pcm4 || !q_is_pcm0 || !q_is_pcm4) && - (!d0030 || !d0434)) { - p3_src = LD_UH(p3); - p2_src = LD_UH(p2); - p1_src = LD_UH(p1); - p0_src = LD_UH(p0); - - cmp0 = __msa_fill_d(q_is_pcm0); - cmp1 = __msa_fill_d(q_is_pcm4); - q_is_pcm_vec = __msa_ilvev_d(cmp1, cmp0); - q_is_pcm_vec = __msa_ceqi_d(q_is_pcm_vec, 0); - - tc0 = tc[0]; - beta30 = beta >> 3; - beta20 = beta >> 2; - tc250 = ((tc0 * 5 + 1) >> 1); - tc4 = tc[1]; - tc254 = ((tc4 * 5 + 1) >> 1); - - cmp0 = (v2i64) __msa_fill_h(tc0); - cmp1 = (v2i64) __msa_fill_h(tc4); - - ILVR_B4_UH(zero, p3_src, zero, p2_src, zero, p1_src, zero, p0_src, - p3_src, p2_src, p1_src, p0_src); - q0_src = LD_UH(q0); - q1_src = LD_UH(q1); - q2_src = LD_UH(q2); - q3_src = LD_UH(q3); - - flag0 = abs(p3[0] - p0[0]) + abs(q3[0] - q0[0]) < beta30 && - abs(p0[0] - q0[0]) < tc250; - flag0 = flag0 && (abs(p3[3] - p0[3]) + abs(q3[3] - q0[3]) < beta30 && - abs(p0[3] - q0[3]) < tc250 && (d00 << 1) < beta20 && - (d30 << 1) < beta20); - - tc_pos = (v8i16) __msa_ilvev_d(cmp1, cmp0); - ILVR_B4_UH(zero, q0_src, zero, q1_src, zero, q2_src, zero, q3_src, - q0_src, q1_src, q2_src, q3_src); - flag1 = abs(p3[4] - p0[4]) + abs(q3[4] - q0[4]) < beta30 && - abs(p0[4] - q0[4]) < tc254; - flag1 = flag1 && (abs(p3[7] - p0[7]) + abs(q3[7] - q0[7]) < beta30 && - abs(p0[7] - q0[7]) < tc254 && (d04 << 1) < beta20 && - (d34 << 1) < beta20); - - cmp0 = (v2i64) __msa_fill_w(flag0); - cmp1 = (v2i64) __msa_fill_w(flag1); - cmp2 = (v2i64) __msa_ilvev_w((v4i32) cmp1, (v4i32) cmp0); - cmp2 = (v2i64) __msa_ceqi_w((v4i32) cmp2, 0); - - if (flag0 && flag1) { /* strong only */ - /* strong filter */ - tc_pos <<= 1; - tc_neg = -tc_pos; - - /* p part */ - temp0 = (p1_src + p0_src + q0_src); - temp1 = ((p3_src + p2_src) << 1) + p2_src + temp0; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - p2_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst0 = (v16u8) (temp2 + (v8i16) p2_src); - - temp1 = temp0 + p2_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 2); - temp2 = (v8i16) (temp1 - p1_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst1 = (v16u8) (temp2 + (v8i16) p1_src); - - temp1 = (temp0 << 1) + p2_src + q1_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - p0_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst2 = (v16u8) (temp2 + (v8i16) p0_src); - - dst0 = __msa_bmz_v(dst0, (v16u8) p2_src, (v16u8) p_is_pcm_vec); - dst1 = __msa_bmz_v(dst1, (v16u8) p1_src, (v16u8) p_is_pcm_vec); - dst2 = __msa_bmz_v(dst2, (v16u8) p0_src, (v16u8) p_is_pcm_vec); - - /* q part */ - temp0 = (q1_src + p0_src + q0_src); - - temp1 = ((q3_src + q2_src) << 1) + q2_src + temp0; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - q2_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst5 = (v16u8) (temp2 + (v8i16) q2_src); - - temp1 = temp0 + q2_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 2); - temp2 = (v8i16) (temp1 - q1_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst4 = (v16u8) (temp2 + (v8i16) q1_src); - - temp1 = (temp0 << 1) + p1_src + q2_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - q0_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst3 = (v16u8) (temp2 + (v8i16) q0_src); - - dst3 = __msa_bmz_v(dst3, (v16u8) q0_src, (v16u8) q_is_pcm_vec); - dst4 = __msa_bmz_v(dst4, (v16u8) q1_src, (v16u8) q_is_pcm_vec); - dst5 = __msa_bmz_v(dst5, (v16u8) q2_src, (v16u8) q_is_pcm_vec); - - /* pack results to 8 bit */ - PCKEV_B2_UB(dst1, dst0, dst3, dst2, dst0, dst1); - dst2 = (v16u8) __msa_pckev_b((v16i8) dst5, (v16i8) dst4); - - /* pack src to 8 bit */ - PCKEV_B2_UB(p1_src, p2_src, q0_src, p0_src, dst3, dst4); - dst5 = (v16u8) __msa_pckev_b((v16i8) q2_src, (v16i8) q1_src); - - dst0 = __msa_bmz_v(dst0, dst3, (v16u8) cmp3); - dst1 = __msa_bmz_v(dst1, dst4, (v16u8) cmp3); - dst2 = __msa_bmz_v(dst2, dst5, (v16u8) cmp3); - - dst_val0 = __msa_copy_u_d((v2i64) dst2, 0); - dst_val1 = __msa_copy_u_d((v2i64) dst2, 1); - - ST_D4(dst0, dst1, 0, 1, 0, 1, p2, stride); - SD(dst_val0, p2 + 4 * stride); - SD(dst_val1, p2 + 5 * stride); - /* strong filter ends */ - } else if (flag0 == flag1) { /* weak only */ - /* weak filter */ - tc_neg = -tc_pos; - - diff0 = (v8i16) (q0_src - p0_src); - diff1 = (v8i16) (q1_src - p1_src); - diff0 = (diff0 << 3) + diff0; - diff1 = (diff1 << 1) + diff1; - delta0 = diff0 - diff1; - delta0 = __msa_srari_h(delta0, 4); - - temp1 = (v8u16) ((tc_pos << 3) + (tc_pos << 1)); - abs_delta0 = __msa_add_a_h(delta0, (v8i16) zero); - abs_delta0 = (v8u16) abs_delta0 < temp1; - - CLIP_SH(delta0, tc_neg, tc_pos); - - temp2 = (v8i16) (delta0 + p0_src); - CLIP_SH_0_255(temp2); - temp0 = (v8u16) __msa_bmz_v((v16u8) temp2, (v16u8) p0_src, - (v16u8) p_is_pcm_vec); - - temp2 = (v8i16) (q0_src - delta0); - CLIP_SH_0_255(temp2); - temp2 = (v8i16) __msa_bmz_v((v16u8) temp2, (v16u8) q0_src, - (v16u8) q_is_pcm_vec); - - p_is_pcm_vec = ~p_is_pcm_vec; - q_is_pcm_vec = ~q_is_pcm_vec; - tmp = (beta + (beta >> 1)) >> 3; - cmp0 = __msa_fill_d(dp00 + dp30 < tmp); - cmp1 = __msa_fill_d(dp04 + dp34 < tmp); - cmp0 = __msa_ilvev_d(cmp1, cmp0); - cmp0 = __msa_ceqi_d(cmp0, 0); - p_is_pcm_vec = p_is_pcm_vec | cmp0; - - cmp0 = __msa_fill_d(dq00 + dq30 < tmp); - cmp1 = __msa_fill_d(dq04 + dq34 < tmp); - cmp0 = __msa_ilvev_d(cmp1, cmp0); - cmp0 = __msa_ceqi_d(cmp0, 0); - q_is_pcm_vec = q_is_pcm_vec | cmp0; - - tc_pos >>= 1; - tc_neg = -tc_pos; - - delta1 = (v8i16) __msa_aver_u_h(p2_src, p0_src); - delta1 -= (v8i16) p1_src; - delta1 += delta0; - delta1 >>= 1; - CLIP_SH(delta1, tc_neg, tc_pos); - delta1 = (v8i16) p1_src + (v8i16) delta1; - CLIP_SH_0_255(delta1); - delta1 = (v8i16) __msa_bmnz_v((v16u8) delta1, (v16u8) p1_src, - (v16u8) p_is_pcm_vec); - - delta2 = (v8i16) __msa_aver_u_h(q0_src, q2_src); - delta2 = delta2 - (v8i16) q1_src; - delta2 = delta2 - delta0; - delta2 = delta2 >> 1; - CLIP_SH(delta2, tc_neg, tc_pos); - delta2 = (v8i16) q1_src + (v8i16) delta2; - CLIP_SH_0_255(delta2); - delta2 = (v8i16) __msa_bmnz_v((v16u8) delta2, (v16u8) q1_src, - (v16u8) q_is_pcm_vec); - - dst1 = (v16u8) __msa_bmz_v((v16u8) delta1, (v16u8) p1_src, - (v16u8) abs_delta0); - dst2 = (v16u8) __msa_bmz_v((v16u8) temp0, (v16u8) p0_src, - (v16u8) abs_delta0); - dst3 = (v16u8) __msa_bmz_v((v16u8) temp2, (v16u8) q0_src, - (v16u8) abs_delta0); - dst4 = (v16u8) __msa_bmz_v((v16u8) delta2, (v16u8) q1_src, - (v16u8) abs_delta0); - /* pack results to 8 bit */ - PCKEV_B2_UB(dst2, dst1, dst4, dst3, dst0, dst1); - - /* pack src to 8 bit */ - PCKEV_B2_UB(p0_src, p1_src, q1_src, q0_src, dst2, dst3); - - dst0 = __msa_bmz_v(dst0, dst2, (v16u8) cmp3); - dst1 = __msa_bmz_v(dst1, dst3, (v16u8) cmp3); - - p2 += stride; - ST_D4(dst0, dst1, 0, 1, 0, 1, p2, stride); - /* weak filter ends */ - } else { /* strong + weak */ - /* strong filter */ - tc_pos <<= 1; - tc_neg = -tc_pos; - - /* p part */ - temp0 = (p1_src + p0_src + q0_src); - temp1 = ((p3_src + p2_src) << 1) + p2_src + temp0; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - p2_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst0 = (v16u8) (temp2 + (v8i16) p2_src); - - temp1 = temp0 + p2_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 2); - temp2 = (v8i16) (temp1 - p1_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst1 = (v16u8) (temp2 + (v8i16) p1_src); - - temp1 = (temp0 << 1) + p2_src + q1_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - p0_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst2 = (v16u8) (temp2 + (v8i16) p0_src); - - dst0 = __msa_bmz_v(dst0, (v16u8) p2_src, (v16u8) p_is_pcm_vec); - dst1 = __msa_bmz_v(dst1, (v16u8) p1_src, (v16u8) p_is_pcm_vec); - dst2 = __msa_bmz_v(dst2, (v16u8) p0_src, (v16u8) p_is_pcm_vec); - - /* q part */ - temp0 = (q1_src + p0_src + q0_src); - - temp1 = ((q3_src + q2_src) << 1) + q2_src + temp0; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - q2_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst5 = (v16u8) (temp2 + (v8i16) q2_src); - - temp1 = temp0 + q2_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 2); - temp2 = (v8i16) (temp1 - q1_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst4 = (v16u8) (temp2 + (v8i16) q1_src); - - temp1 = (temp0 << 1) + p1_src + q2_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - q0_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst3 = (v16u8) (temp2 + (v8i16) q0_src); - - dst3 = __msa_bmz_v(dst3, (v16u8) q0_src, (v16u8) q_is_pcm_vec); - dst4 = __msa_bmz_v(dst4, (v16u8) q1_src, (v16u8) q_is_pcm_vec); - dst5 = __msa_bmz_v(dst5, (v16u8) q2_src, (v16u8) q_is_pcm_vec); - - /* pack strong results to 8 bit */ - PCKEV_B2_UB(dst1, dst0, dst3, dst2, dst0, dst1); - dst2 = (v16u8) __msa_pckev_b((v16i8) dst5, (v16i8) dst4); - /* strong filter ends */ - - /* weak filter */ - tc_pos >>= 1; - tc_neg = -tc_pos; - - diff0 = (v8i16) (q0_src - p0_src); - diff1 = (v8i16) (q1_src - p1_src); - diff0 = (diff0 << 3) + diff0; - diff1 = (diff1 << 1) + diff1; - delta0 = diff0 - diff1; - delta0 = __msa_srari_h(delta0, 4); - - temp1 = (v8u16) ((tc_pos << 3) + (tc_pos << 1)); - abs_delta0 = __msa_add_a_h(delta0, (v8i16) zero); - abs_delta0 = (v8u16) abs_delta0 < temp1; - - CLIP_SH(delta0, tc_neg, tc_pos); - - temp2 = (v8i16) (delta0 + p0_src); - CLIP_SH_0_255(temp2); - temp0 = (v8u16) __msa_bmz_v((v16u8) temp2, (v16u8) p0_src, - (v16u8) p_is_pcm_vec); - - temp2 = (v8i16) (q0_src - delta0); - CLIP_SH_0_255(temp2); - temp2 = (v8i16) __msa_bmz_v((v16u8) temp2, (v16u8) q0_src, - (v16u8) q_is_pcm_vec); - - p_is_pcm_vec = ~p_is_pcm_vec; - q_is_pcm_vec = ~q_is_pcm_vec; - tmp = (beta + (beta >> 1)) >> 3; - cmp0 = __msa_fill_d(dp00 + dp30 < tmp); - cmp1 = __msa_fill_d(dp04 + dp34 < tmp); - cmp0 = __msa_ilvev_d(cmp1, cmp0); - p_is_pcm_vec = p_is_pcm_vec | __msa_ceqi_d(cmp0, 0); - - cmp0 = __msa_fill_d(dq00 + dq30 < tmp); - cmp1 = __msa_fill_d(dq04 + dq34 < tmp); - cmp0 = __msa_ilvev_d(cmp1, cmp0); - q_is_pcm_vec = q_is_pcm_vec | __msa_ceqi_d(cmp0, 0); - - tc_pos >>= 1; - tc_neg = -tc_pos; - - delta1 = (v8i16) __msa_aver_u_h(p2_src, p0_src); - delta1 -= (v8i16) p1_src; - delta1 += delta0; - delta1 >>= 1; - CLIP_SH(delta1, tc_neg, tc_pos); - delta1 = (v8i16) p1_src + (v8i16) delta1; - CLIP_SH_0_255(delta1); - delta1 = (v8i16) __msa_bmnz_v((v16u8) delta1, (v16u8) p1_src, - (v16u8) p_is_pcm_vec); - - delta2 = (v8i16) __msa_aver_u_h(q0_src, q2_src); - delta2 = delta2 - (v8i16) q1_src; - delta2 = delta2 - delta0; - delta2 = delta2 >> 1; - CLIP_SH(delta2, tc_neg, tc_pos); - delta2 = (v8i16) q1_src + (v8i16) delta2; - CLIP_SH_0_255(delta2); - delta2 = (v8i16) __msa_bmnz_v((v16u8) delta2, (v16u8) q1_src, - (v16u8) q_is_pcm_vec); - - delta1 = (v8i16) __msa_bmz_v((v16u8) delta1, (v16u8) p1_src, - (v16u8) abs_delta0); - temp0 = (v8u16) __msa_bmz_v((v16u8) temp0, (v16u8) p0_src, - (v16u8) abs_delta0); - temp2 = (v8i16) __msa_bmz_v((v16u8) temp2, (v16u8) q0_src, - (v16u8) abs_delta0); - delta2 = (v8i16) __msa_bmz_v((v16u8) delta2, (v16u8) q1_src, - (v16u8) abs_delta0); - /* weak filter ends */ - - /* pack weak results to 8 bit */ - PCKEV_B2_UB(delta1, p2_src, temp2, temp0, dst3, dst4); - dst5 = (v16u8) __msa_pckev_b((v16i8) q2_src, (v16i8) delta2); - - /* select between weak or strong */ - dst0 = __msa_bmnz_v(dst0, dst3, (v16u8) cmp2); - dst1 = __msa_bmnz_v(dst1, dst4, (v16u8) cmp2); - dst2 = __msa_bmnz_v(dst2, dst5, (v16u8) cmp2); - - /* pack src to 8 bit */ - PCKEV_B2_UB(p1_src, p2_src, q0_src, p0_src, dst3, dst4); - dst5 = (v16u8) __msa_pckev_b((v16i8) q2_src, (v16i8) q1_src); - - dst0 = __msa_bmz_v(dst0, dst3, (v16u8) cmp3); - dst1 = __msa_bmz_v(dst1, dst4, (v16u8) cmp3); - dst2 = __msa_bmz_v(dst2, dst5, (v16u8) cmp3); - - dst_val0 = __msa_copy_u_d((v2i64) dst2, 0); - dst_val1 = __msa_copy_u_d((v2i64) dst2, 1); - - ST_D4(dst0, dst1, 0, 1, 0, 1, p2, stride); - SD(dst_val0, p2 + 4 * stride); - SD(dst_val1, p2 + 5 * stride); - } - } -} - -static void hevc_loopfilter_luma_ver_msa(uint8_t *src, int32_t stride, - int32_t beta, const int32_t *tc, - const uint8_t *p_is_pcm, const uint8_t *q_is_pcm) -{ - uint8_t *p3 = src; - uint8_t *p2 = src + 3 * stride; - uint8_t *p1 = src + (stride << 2); - uint8_t *p0 = src + 7 * stride; - uint8_t flag0, flag1; - uint16_t tmp0, tmp1; - uint32_t tmp2, tmp3; - int32_t dp00, dq00, dp30, dq30, d00, d30; - int32_t d0030, d0434; - int32_t dp04, dq04, dp34, dq34, d04, d34; - int32_t tc0, p_is_pcm0, q_is_pcm0, beta30, beta20, tc250; - int32_t tc4, p_is_pcm4, q_is_pcm4, tc254, tmp; - v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; - v2i64 cmp0, cmp1, cmp2, p_is_pcm_vec, q_is_pcm_vec; - v2i64 cmp3; - v8u16 temp0, temp1; - v8i16 temp2; - v8i16 tc_pos, tc_neg; - v8i16 diff0, diff1, delta0, delta1, delta2, abs_delta0; - v16i8 zero = { 0 }; - v8u16 p3_src, p2_src, p1_src, p0_src, q0_src, q1_src, q2_src, q3_src; - - dp00 = abs(p3[-3] - (p3[-2] << 1) + p3[-1]); - dq00 = abs(p3[2] - (p3[1] << 1) + p3[0]); - dp30 = abs(p2[-3] - (p2[-2] << 1) + p2[-1]); - dq30 = abs(p2[2] - (p2[1] << 1) + p2[0]); - d00 = dp00 + dq00; - d30 = dp30 + dq30; - p_is_pcm0 = p_is_pcm[0]; - q_is_pcm0 = q_is_pcm[0]; - - dp04 = abs(p1[-3] - (p1[-2] << 1) + p1[-1]); - dq04 = abs(p1[2] - (p1[1] << 1) + p1[0]); - dp34 = abs(p0[-3] - (p0[-2] << 1) + p0[-1]); - dq34 = abs(p0[2] - (p0[1] << 1) + p0[0]); - d04 = dp04 + dq04; - d34 = dp34 + dq34; - p_is_pcm4 = p_is_pcm[1]; - q_is_pcm4 = q_is_pcm[1]; - - cmp0 = __msa_fill_d(p_is_pcm0); - cmp1 = __msa_fill_d(p_is_pcm4); - p_is_pcm_vec = __msa_ilvev_d(cmp1, cmp0); - p_is_pcm_vec = __msa_ceqi_d(p_is_pcm_vec, 0); - - d0030 = (d00 + d30) >= beta; - d0434 = (d04 + d34) >= beta; - - cmp0 = __msa_fill_d(d0030); - cmp1 = __msa_fill_d(d0434); - cmp3 = __msa_ilvev_d(cmp1, cmp0); - cmp3 = (v2i64) __msa_ceqi_d(cmp3, 0); - - if ((!p_is_pcm0 || !p_is_pcm4 || !q_is_pcm0 || !q_is_pcm4) && - (!d0030 || !d0434)) { - src -= 4; - LD_UH8(src, stride, p3_src, p2_src, p1_src, p0_src, q0_src, q1_src, - q2_src, q3_src); - - cmp0 = __msa_fill_d(q_is_pcm0); - cmp1 = __msa_fill_d(q_is_pcm4); - q_is_pcm_vec = __msa_ilvev_d(cmp1, cmp0); - q_is_pcm_vec = __msa_ceqi_d(q_is_pcm_vec, 0); - - tc0 = tc[0]; - beta30 = beta >> 3; - beta20 = beta >> 2; - tc250 = ((tc0 * 5 + 1) >> 1); - - tc4 = tc[1]; - tc254 = ((tc4 * 5 + 1) >> 1); - cmp0 = (v2i64) __msa_fill_h(tc0 << 1); - cmp1 = (v2i64) __msa_fill_h(tc4 << 1); - tc_pos = (v8i16) __msa_ilvev_d(cmp1, cmp0); - - TRANSPOSE8x8_UB_UH(p3_src, p2_src, p1_src, p0_src, q0_src, q1_src, - q2_src, q3_src, p3_src, p2_src, p1_src, p0_src, - q0_src, q1_src, q2_src, q3_src); - - flag0 = abs(p3[-4] - p3[-1]) + abs(p3[3] - p3[0]) < beta30 && - abs(p3[-1] - p3[0]) < tc250; - flag0 = flag0 && (abs(p2[-4] - p2[-1]) + abs(p2[3] - p2[0]) < beta30 && - abs(p2[-1] - p2[0]) < tc250 && (d00 << 1) < beta20 && - (d30 << 1) < beta20); - cmp0 = __msa_fill_d(flag0); - ILVR_B4_UH(zero, p3_src, zero, p2_src, zero, p1_src, zero, p0_src, - p3_src, p2_src, p1_src, p0_src); - - flag1 = abs(p1[-4] - p1[-1]) + abs(p1[3] - p1[0]) < beta30 && - abs(p1[-1] - p1[0]) < tc254; - flag1 = flag1 && (abs(p0[-4] - p0[-1]) + abs(p0[3] - p0[0]) < beta30 && - abs(p0[-1] - p0[0]) < tc254 && (d04 << 1) < beta20 && - (d34 << 1) < beta20); - ILVR_B4_UH(zero, q0_src, zero, q1_src, zero, q2_src, zero, q3_src, - q0_src, q1_src, q2_src, q3_src); - - cmp1 = __msa_fill_d(flag1); - cmp2 = __msa_ilvev_d(cmp1, cmp0); - cmp2 = __msa_ceqi_d(cmp2, 0); - - if (flag0 && flag1) { /* strong only */ - /* strong filter */ - tc_neg = -tc_pos; - - /* p part */ - temp0 = (p1_src + p0_src + q0_src); - - temp1 = ((p3_src + p2_src) << 1) + p2_src + temp0; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - p2_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst0 = (v16u8) (temp2 + (v8i16) p2_src); - - temp1 = temp0 + p2_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 2); - temp2 = (v8i16) (temp1 - p1_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst1 = (v16u8) (temp2 + (v8i16) p1_src); - - temp1 = (temp0 << 1) + p2_src + q1_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - p0_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst2 = (v16u8) (temp2 + (v8i16) p0_src); - - dst0 = __msa_bmz_v(dst0, (v16u8) p2_src, (v16u8) p_is_pcm_vec); - dst1 = __msa_bmz_v(dst1, (v16u8) p1_src, (v16u8) p_is_pcm_vec); - dst2 = __msa_bmz_v(dst2, (v16u8) p0_src, (v16u8) p_is_pcm_vec); - - /* q part */ - temp0 = (q1_src + p0_src + q0_src); - temp1 = ((q3_src + q2_src) << 1) + q2_src + temp0; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - q2_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst5 = (v16u8) (temp2 + (v8i16) q2_src); - - temp1 = temp0 + q2_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 2); - temp2 = (v8i16) (temp1 - q1_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst4 = (v16u8) (temp2 + (v8i16) q1_src); - - temp1 = (temp0 << 1) + p1_src + q2_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - q0_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst3 = (v16u8) (temp2 + (v8i16) q0_src); - - dst3 = __msa_bmz_v(dst3, (v16u8) q0_src, (v16u8) q_is_pcm_vec); - dst4 = __msa_bmz_v(dst4, (v16u8) q1_src, (v16u8) q_is_pcm_vec); - dst5 = __msa_bmz_v(dst5, (v16u8) q2_src, (v16u8) q_is_pcm_vec); - /* strong filter ends */ - } else if (flag0 == flag1) { /* weak only */ - /* weak filter */ - tc_pos >>= 1; - tc_neg = -tc_pos; - - diff0 = (v8i16) (q0_src - p0_src); - diff1 = (v8i16) (q1_src - p1_src); - diff0 = (diff0 << 3) + diff0; - diff1 = (diff1 << 1) + diff1; - delta0 = diff0 - diff1; - delta0 = __msa_srari_h(delta0, 4); - - temp1 = (v8u16) ((tc_pos << 3) + (tc_pos << 1)); - abs_delta0 = __msa_add_a_h(delta0, (v8i16) zero); - abs_delta0 = (v8u16) abs_delta0 < temp1; - - CLIP_SH(delta0, tc_neg, tc_pos); - temp2 = (v8i16) (delta0 + p0_src); - CLIP_SH_0_255(temp2); - temp0 = (v8u16) __msa_bmz_v((v16u8) temp2, (v16u8) p0_src, - (v16u8) p_is_pcm_vec); - - temp2 = (v8i16) (q0_src - delta0); - CLIP_SH_0_255(temp2); - temp2 = (v8i16) __msa_bmz_v((v16u8) temp2, (v16u8) q0_src, - (v16u8) q_is_pcm_vec); - - tmp = ((beta + (beta >> 1)) >> 3); - cmp0 = __msa_fill_d(!p_is_pcm0 && ((dp00 + dp30) < tmp)); - cmp1 = __msa_fill_d(!p_is_pcm4 && ((dp04 + dp34) < tmp)); - p_is_pcm_vec = __msa_ilvev_d(cmp1, cmp0); - p_is_pcm_vec = __msa_ceqi_d(p_is_pcm_vec, 0); - - cmp0 = (v2i64) __msa_fill_h((!q_is_pcm0) && (dq00 + dq30 < tmp)); - cmp1 = (v2i64) __msa_fill_h((!q_is_pcm4) && (dq04 + dq34 < tmp)); - q_is_pcm_vec = __msa_ilvev_d(cmp1, cmp0); - q_is_pcm_vec = __msa_ceqi_d(q_is_pcm_vec, 0); - - tc_pos >>= 1; - tc_neg = -tc_pos; - - delta1 = (v8i16) __msa_aver_u_h(p2_src, p0_src); - delta1 -= (v8i16) p1_src; - delta1 += delta0; - delta1 >>= 1; - CLIP_SH(delta1, tc_neg, tc_pos); - delta1 = (v8i16) p1_src + (v8i16) delta1; - CLIP_SH_0_255(delta1); - delta1 = (v8i16) __msa_bmnz_v((v16u8) delta1, (v16u8) p1_src, - (v16u8) p_is_pcm_vec); - - delta2 = (v8i16) __msa_aver_u_h(q0_src, q2_src); - delta2 = delta2 - (v8i16) q1_src; - delta2 = delta2 - delta0; - delta2 = delta2 >> 1; - CLIP_SH(delta2, tc_neg, tc_pos); - delta2 = (v8i16) q1_src + (v8i16) delta2; - CLIP_SH_0_255(delta2); - delta2 = (v8i16) __msa_bmnz_v((v16u8) delta2, (v16u8) q1_src, - (v16u8) q_is_pcm_vec); - - dst0 = __msa_bmz_v((v16u8) delta1, (v16u8) p1_src, - (v16u8) abs_delta0); - dst1 = __msa_bmz_v((v16u8) temp0, (v16u8) p0_src, - (v16u8) abs_delta0); - dst2 = __msa_bmz_v((v16u8) temp2, (v16u8) q0_src, - (v16u8) abs_delta0); - dst3 = __msa_bmz_v((v16u8) delta2, (v16u8) q1_src, - (v16u8) abs_delta0); - /* weak filter ends */ - - dst0 = __msa_bmz_v(dst0, (v16u8) p1_src, (v16u8) cmp3); - dst1 = __msa_bmz_v(dst1, (v16u8) p0_src, (v16u8) cmp3); - dst2 = __msa_bmz_v(dst2, (v16u8) q0_src, (v16u8) cmp3); - dst3 = __msa_bmz_v(dst3, (v16u8) q1_src, (v16u8) cmp3); - - PCKEV_B2_UB(dst2, dst0, dst3, dst1, dst0, dst1); - - /* transpose */ - ILVRL_B2_UB(dst1, dst0, dst4, dst5); - ILVRL_H2_UB(dst5, dst4, dst0, dst1); - - src += 2; - - tmp2 = __msa_copy_u_w((v4i32) dst0, 0); - tmp3 = __msa_copy_u_w((v4i32) dst0, 1); - SW(tmp2, src); - src += stride; - SW(tmp3, src); - src += stride; - - tmp2 = __msa_copy_u_w((v4i32) dst0, 2); - tmp3 = __msa_copy_u_w((v4i32) dst0, 3); - SW(tmp2, src); - src += stride; - SW(tmp3, src); - src += stride; - - tmp2 = __msa_copy_u_w((v4i32) dst1, 0); - tmp3 = __msa_copy_u_w((v4i32) dst1, 1); - SW(tmp2, src); - src += stride; - SW(tmp3, src); - src += stride; - - tmp2 = __msa_copy_u_w((v4i32) dst1, 2); - tmp3 = __msa_copy_u_w((v4i32) dst1, 3); - SW(tmp2, src); - src += stride; - SW(tmp3, src); - - return; - } else { /* strong + weak */ - /* strong filter */ - tc_neg = -tc_pos; - - /* p part */ - temp0 = (p1_src + p0_src + q0_src); - - temp1 = ((p3_src + p2_src) << 1) + p2_src + temp0; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - p2_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst0 = (v16u8) (temp2 + (v8i16) p2_src); - - temp1 = temp0 + p2_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 2); - temp2 = (v8i16) (temp1 - p1_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst1 = (v16u8) (temp2 + (v8i16) p1_src); - - temp1 = (temp0 << 1) + p2_src + q1_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - p0_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst2 = (v16u8) (temp2 + (v8i16) p0_src); - - dst0 = __msa_bmz_v(dst0, (v16u8) p2_src, (v16u8) p_is_pcm_vec); - dst1 = __msa_bmz_v(dst1, (v16u8) p1_src, (v16u8) p_is_pcm_vec); - dst2 = __msa_bmz_v(dst2, (v16u8) p0_src, (v16u8) p_is_pcm_vec); - - /* q part */ - temp0 = (q1_src + p0_src + q0_src); - temp1 = ((q3_src + q2_src) << 1) + q2_src + temp0; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - q2_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst5 = (v16u8) (temp2 + (v8i16) q2_src); - - temp1 = temp0 + q2_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 2); - temp2 = (v8i16) (temp1 - q1_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst4 = (v16u8) (temp2 + (v8i16) q1_src); - - temp1 = (temp0 << 1) + p1_src + q2_src; - temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); - temp2 = (v8i16) (temp1 - q0_src); - CLIP_SH(temp2, tc_neg, tc_pos); - dst3 = (v16u8) (temp2 + (v8i16) q0_src); - - dst3 = __msa_bmz_v(dst3, (v16u8) q0_src, (v16u8) q_is_pcm_vec); - dst4 = __msa_bmz_v(dst4, (v16u8) q1_src, (v16u8) q_is_pcm_vec); - dst5 = __msa_bmz_v(dst5, (v16u8) q2_src, (v16u8) q_is_pcm_vec); - /* strong filter ends */ - - /* weak filter */ - tc_pos >>= 1; - tc_neg = -tc_pos; - - diff0 = (v8i16) (q0_src - p0_src); - diff1 = (v8i16) (q1_src - p1_src); - diff0 = (diff0 << 3) + diff0; - diff1 = (diff1 << 1) + diff1; - delta0 = diff0 - diff1; - delta0 = __msa_srari_h(delta0, 4); - - temp1 = (v8u16) ((tc_pos << 3) + (tc_pos << 1)); - abs_delta0 = __msa_add_a_h(delta0, (v8i16) zero); - abs_delta0 = (v8u16) abs_delta0 < temp1; - - CLIP_SH(delta0, tc_neg, tc_pos); - - temp2 = (v8i16) (delta0 + p0_src); - CLIP_SH_0_255(temp2); - temp0 = (v8u16) __msa_bmz_v((v16u8) temp2, (v16u8) p0_src, - (v16u8) p_is_pcm_vec); - - temp2 = (v8i16) (q0_src - delta0); - CLIP_SH_0_255(temp2); - temp2 = (v8i16) __msa_bmz_v((v16u8) temp2, (v16u8) q0_src, - (v16u8) q_is_pcm_vec); - - tmp = (beta + (beta >> 1)) >> 3; - cmp0 = __msa_fill_d(!p_is_pcm0 && ((dp00 + dp30) < tmp)); - cmp1 = __msa_fill_d(!p_is_pcm4 && ((dp04 + dp34) < tmp)); - p_is_pcm_vec = __msa_ilvev_d(cmp1, cmp0); - p_is_pcm_vec = __msa_ceqi_d(p_is_pcm_vec, 0); - - cmp0 = (v2i64) __msa_fill_h((!q_is_pcm0) && (dq00 + dq30 < tmp)); - cmp1 = (v2i64) __msa_fill_h((!q_is_pcm4) && (dq04 + dq34 < tmp)); - q_is_pcm_vec = __msa_ilvev_d(cmp1, cmp0); - q_is_pcm_vec = __msa_ceqi_d(q_is_pcm_vec, 0); - - tc_pos >>= 1; - tc_neg = -tc_pos; - - delta1 = (v8i16) __msa_aver_u_h(p2_src, p0_src); - delta1 -= (v8i16) p1_src; - delta1 += delta0; - delta1 >>= 1; - CLIP_SH(delta1, tc_neg, tc_pos); - delta1 = (v8i16) p1_src + (v8i16) delta1; - CLIP_SH_0_255(delta1); - delta1 = (v8i16) __msa_bmnz_v((v16u8) delta1, (v16u8) p1_src, - (v16u8) p_is_pcm_vec); - - delta2 = (v8i16) __msa_aver_u_h(q0_src, q2_src); - delta2 = delta2 - (v8i16) q1_src; - delta2 = delta2 - delta0; - delta2 = delta2 >> 1; - CLIP_SH(delta2, tc_neg, tc_pos); - delta2 = (v8i16) q1_src + (v8i16) delta2; - CLIP_SH_0_255(delta2); - delta2 = (v8i16) __msa_bmnz_v((v16u8) delta2, (v16u8) q1_src, - (v16u8) q_is_pcm_vec); - delta1 = (v8i16) __msa_bmz_v((v16u8) delta1, (v16u8) p1_src, - (v16u8) abs_delta0); - temp0 = (v8u16) __msa_bmz_v((v16u8) temp0, (v16u8) p0_src, - (v16u8) abs_delta0); - temp2 = (v8i16) __msa_bmz_v((v16u8) temp2, (v16u8) q0_src, - (v16u8) abs_delta0); - delta2 = (v8i16) __msa_bmz_v((v16u8) delta2, (v16u8) q1_src, - (v16u8) abs_delta0); - /* weak filter ends*/ - - /* select between weak or strong */ - dst2 = __msa_bmnz_v(dst2, (v16u8) temp0, (v16u8) cmp2); - dst3 = __msa_bmnz_v(dst3, (v16u8) temp2, (v16u8) cmp2); - dst1 = __msa_bmnz_v(dst1, (v16u8) delta1, (v16u8) cmp2); - dst4 = __msa_bmnz_v(dst4, (v16u8) delta2, (v16u8) cmp2); - dst0 = __msa_bmnz_v(dst0, (v16u8) p2_src, (v16u8) cmp2); - dst5 = __msa_bmnz_v(dst5, (v16u8) q2_src, (v16u8) cmp2); - } - - dst0 = __msa_bmz_v(dst0, (v16u8) p2_src, (v16u8) cmp3); - dst1 = __msa_bmz_v(dst1, (v16u8) p1_src, (v16u8) cmp3); - dst2 = __msa_bmz_v(dst2, (v16u8) p0_src, (v16u8) cmp3); - dst3 = __msa_bmz_v(dst3, (v16u8) q0_src, (v16u8) cmp3); - dst4 = __msa_bmz_v(dst4, (v16u8) q1_src, (v16u8) cmp3); - dst5 = __msa_bmz_v(dst5, (v16u8) q2_src, (v16u8) cmp3); - - /* pack results to 8 bit */ - PCKEV_B4_UB(dst2, dst0, dst3, dst1, dst4, dst4, dst5, dst5, dst0, dst1, - dst2, dst3); - - /* transpose */ - ILVRL_B2_UB(dst1, dst0, dst4, dst5); - ILVRL_B2_UB(dst3, dst2, dst6, dst7); - ILVRL_H2_UB(dst5, dst4, dst0, dst1); - ILVRL_H2_UB(dst7, dst6, dst2, dst3); - - src += 1; - - tmp2 = __msa_copy_u_w((v4i32) dst0, 0); - tmp3 = __msa_copy_u_w((v4i32) dst0, 1); - tmp0 = __msa_copy_u_h((v8i16) dst2, 0); - tmp1 = __msa_copy_u_h((v8i16) dst2, 2); - SW(tmp2, src); - SH(tmp0, src + 4); - src += stride; - SW(tmp3, src); - SH(tmp1, src + 4); - src += stride; - - tmp2 = __msa_copy_u_w((v4i32) dst0, 2); - tmp3 = __msa_copy_u_w((v4i32) dst0, 3); - tmp0 = __msa_copy_u_h((v8i16) dst2, 4); - tmp1 = __msa_copy_u_h((v8i16) dst2, 6); - SW(tmp2, src); - SH(tmp0, src + 4); - src += stride; - SW(tmp3, src); - SH(tmp1, src + 4); - src += stride; - - tmp2 = __msa_copy_u_w((v4i32) dst1, 0); - tmp3 = __msa_copy_u_w((v4i32) dst1, 1); - tmp0 = __msa_copy_u_h((v8i16) dst3, 0); - tmp1 = __msa_copy_u_h((v8i16) dst3, 2); - SW(tmp2, src); - SH(tmp0, src + 4); - src += stride; - SW(tmp3, src); - SH(tmp1, src + 4); - src += stride; - - tmp2 = __msa_copy_u_w((v4i32) dst1, 2); - tmp3 = __msa_copy_u_w((v4i32) dst1, 3); - tmp0 = __msa_copy_u_h((v8i16) dst3, 4); - tmp1 = __msa_copy_u_h((v8i16) dst3, 6); - SW(tmp2, src); - SH(tmp0, src + 4); - src += stride; - SW(tmp3, src); - SH(tmp1, src + 4); - } -} - -static void hevc_loopfilter_chroma_hor_msa(uint8_t *src, int32_t stride, - const int32_t *tc, const uint8_t *p_is_pcm, - const uint8_t *q_is_pcm) -{ - uint8_t *p1_ptr = src - (stride << 1); - uint8_t *p0_ptr = src - stride; - uint8_t *q0_ptr = src; - uint8_t *q1_ptr = src + stride; - v2i64 cmp0, cmp1, p_is_pcm_vec, q_is_pcm_vec; - v8u16 p1, p0, q0, q1; - v8i16 tc_pos, tc_neg; - v16i8 zero = { 0 }; - v8i16 temp0, temp1, delta; - - if (!(tc[0] <= 0) || !(tc[1] <= 0)) { - cmp0 = (v2i64) __msa_fill_h(tc[0]); - cmp1 = (v2i64) __msa_fill_h(tc[1]); - tc_pos = (v8i16) __msa_ilvev_d(cmp1, cmp0); - tc_neg = -tc_pos; - - cmp0 = __msa_fill_d(p_is_pcm[0]); - cmp1 = __msa_fill_d(p_is_pcm[1]); - p_is_pcm_vec = __msa_ilvev_d(cmp1, cmp0); - p_is_pcm_vec = __msa_ceqi_d(p_is_pcm_vec, 0); - - cmp0 = __msa_fill_d(q_is_pcm[0]); - cmp1 = __msa_fill_d(q_is_pcm[1]); - q_is_pcm_vec = __msa_ilvev_d(cmp1, cmp0); - q_is_pcm_vec = __msa_ceqi_d(q_is_pcm_vec, 0); - - p1 = LD_UH(p1_ptr); - p0 = LD_UH(p0_ptr); - q0 = LD_UH(q0_ptr); - q1 = LD_UH(q1_ptr); - - ILVR_B4_UH(zero, p1, zero, p0, zero, q0, zero, q1, p1, p0, q0, q1); - - temp0 = (v8i16) (q0 - p0); - temp1 = (v8i16) (p1 - q1); - temp0 <<= 2; - temp0 += temp1; - delta = __msa_srari_h((v8i16) temp0, 3); - CLIP_SH(delta, tc_neg, tc_pos); - - temp0 = (v8i16) ((v8i16) p0 + delta); - CLIP_SH_0_255(temp0); - temp0 = (v8i16) __msa_bmz_v((v16u8) temp0, (v16u8) p0, - (v16u8) p_is_pcm_vec); - - temp1 = (v8i16) ((v8i16) q0 - delta); - CLIP_SH_0_255(temp1); - temp1 = (v8i16) __msa_bmz_v((v16u8) temp1, (v16u8) q0, - (v16u8) q_is_pcm_vec); - - tc_pos = (v8i16) __msa_clei_s_d((v2i64) tc_pos, 0); - temp0 = (v8i16) __msa_bmnz_v((v16u8) temp0, (v16u8) p0, (v16u8) tc_pos); - temp1 = (v8i16) __msa_bmnz_v((v16u8) temp1, (v16u8) q0, (v16u8) tc_pos); - - temp0 = (v8i16) __msa_pckev_b((v16i8) temp1, (v16i8) temp0); - ST_D2(temp0, 0, 1, p0_ptr, stride); - } -} - -static void hevc_loopfilter_chroma_ver_msa(uint8_t *src, int32_t stride, - const int32_t *tc, const uint8_t *p_is_pcm, - const uint8_t *q_is_pcm) -{ - v2i64 cmp0, cmp1, p_is_pcm_vec, q_is_pcm_vec; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7; - v8u16 p1, p0, q0, q1; - v8i16 tc_pos, tc_neg; - v16i8 zero = { 0 }; - v8i16 temp0, temp1, delta; - - if (!(tc[0] <= 0) || !(tc[1] <= 0)) { - cmp0 = (v2i64) __msa_fill_h(tc[0]); - cmp1 = (v2i64) __msa_fill_h(tc[1]); - tc_pos = (v8i16) __msa_ilvev_d(cmp1, cmp0); - tc_neg = -tc_pos; - - cmp0 = __msa_fill_d(p_is_pcm[0]); - cmp1 = __msa_fill_d(p_is_pcm[1]); - p_is_pcm_vec = __msa_ilvev_d(cmp1, cmp0); - p_is_pcm_vec = __msa_ceqi_d(p_is_pcm_vec, 0); - - cmp0 = __msa_fill_d(q_is_pcm[0]); - cmp1 = __msa_fill_d(q_is_pcm[1]); - q_is_pcm_vec = __msa_ilvev_d(cmp1, cmp0); - q_is_pcm_vec = __msa_ceqi_d(q_is_pcm_vec, 0); - - src -= 2; - LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7); - TRANSPOSE8x4_UB_UH(src0, src1, src2, src3, src4, src5, src6, src7, - p1, p0, q0, q1); - ILVR_B4_UH(zero, p1, zero, p0, zero, q0, zero, q1, p1, p0, q0, q1); - - temp0 = (v8i16) (q0 - p0); - temp1 = (v8i16) (p1 - q1); - temp0 <<= 2; - temp0 += temp1; - delta = __msa_srari_h((v8i16) temp0, 3); - CLIP_SH(delta, tc_neg, tc_pos); - - temp0 = (v8i16) ((v8i16) p0 + delta); - CLIP_SH_0_255(temp0); - temp0 = (v8i16) __msa_bmz_v((v16u8) temp0, (v16u8) p0, - (v16u8) p_is_pcm_vec); - - temp1 = (v8i16) ((v8i16) q0 - delta); - CLIP_SH_0_255(temp1); - temp1 = (v8i16) __msa_bmz_v((v16u8) temp1, (v16u8) q0, - (v16u8) q_is_pcm_vec); - - tc_pos = (v8i16) __msa_clei_s_d((v2i64) tc_pos, 0); - temp0 = (v8i16) __msa_bmnz_v((v16u8) temp0, (v16u8) p0, (v16u8) tc_pos); - temp1 = (v8i16) __msa_bmnz_v((v16u8) temp1, (v16u8) q0, (v16u8) tc_pos); - - temp0 = (v8i16) __msa_ilvev_b((v16i8) temp1, (v16i8) temp0); - - src += 1; - ST_H8(temp0, 0, 1, 2, 3, 4, 5, 6, 7, src, stride); - } -} - -static void hevc_sao_band_filter_4width_msa(uint8_t *dst, int32_t dst_stride, - const uint8_t *src, int32_t src_stride, - int32_t sao_left_class, - const int16_t *sao_offset_val, - int32_t height) -{ - v16u8 src0, src1, src2, src3; - v16i8 src0_r, src1_r; - v16i8 offset, offset_val, mask; - v16i8 dst0, offset0, offset1; - v16i8 zero = { 0 }; - - offset_val = LD_SB(sao_offset_val + 1); - offset_val = (v16i8) __msa_pckev_d((v2i64) offset_val, (v2i64) offset_val); - - offset_val = __msa_pckev_b(offset_val, offset_val); - offset1 = (v16i8) __msa_insve_w((v4i32) zero, 3, (v4i32) offset_val); - offset0 = __msa_sld_b(offset1, zero, 28 - ((sao_left_class) & 31)); - offset1 = __msa_sld_b(zero, offset1, 28 - ((sao_left_class) & 31)); - - /* load in advance. */ - LD_UB4(src, src_stride, src0, src1, src2, src3); - - if (!((sao_left_class > 12) & (sao_left_class < 29))) { - SWAP(offset0, offset1); - } - - for (height -= 4; height; height -= 4) { - src += (4 * src_stride); - - ILVEV_D2_SB(src0, src1, src2, src3, src0_r, src1_r); - - src0_r = (v16i8) __msa_pckev_w((v4i32) src1_r, (v4i32) src0_r); - mask = __msa_srli_b(src0_r, 3); - offset = __msa_vshf_b(mask, offset1, offset0); - - src0_r = (v16i8) __msa_xori_b((v16u8) src0_r, 128); - dst0 = __msa_adds_s_b(src0_r, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - /* load in advance. */ - LD_UB4(src, src_stride, src0, src1, src2, src3); - - /* store results */ - ST_W4(dst0, 0, 1, 2, 3, dst, dst_stride); - dst += (4 * dst_stride); - } - - ILVEV_D2_SB(src0, src1, src2, src3, src0_r, src1_r); - - src0_r = (v16i8) __msa_pckev_w((v4i32) src1_r, (v4i32) src0_r); - mask = __msa_srli_b(src0_r, 3); - offset = __msa_vshf_b(mask, offset1, offset0); - - src0_r = (v16i8) __msa_xori_b((v16u8) src0_r, 128); - dst0 = __msa_adds_s_b(src0_r, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - /* store results */ - ST_W4(dst0, 0, 1, 2, 3, dst, dst_stride); -} - -static void hevc_sao_band_filter_8width_msa(uint8_t *dst, int32_t dst_stride, - const uint8_t *src, int32_t src_stride, - int32_t sao_left_class, - const int16_t *sao_offset_val, - int32_t height) -{ - v16u8 src0, src1, src2, src3; - v16i8 src0_r, src1_r, mask0, mask1; - v16i8 offset_mask0, offset_mask1, offset_val; - v16i8 offset0, offset1, dst0, dst1; - v16i8 zero = { 0 }; - - offset_val = LD_SB(sao_offset_val + 1); - offset_val = (v16i8) __msa_pckev_d((v2i64) offset_val, (v2i64) offset_val); - offset_val = __msa_pckev_b(offset_val, offset_val); - offset1 = (v16i8) __msa_insve_w((v4i32) zero, 3, (v4i32) offset_val); - offset0 = __msa_sld_b(offset1, zero, 28 - ((sao_left_class) & 31)); - offset1 = __msa_sld_b(zero, offset1, 28 - ((sao_left_class) & 31)); - - /* load in advance. */ - LD_UB4(src, src_stride, src0, src1, src2, src3); - - if (!((sao_left_class > 12) & (sao_left_class < 29))) { - SWAP(offset0, offset1); - } - - for (height -= 4; height; height -= 4) { - src += src_stride << 2; - - ILVR_D2_SB(src1, src0, src3, src2, src0_r, src1_r); - - mask0 = __msa_srli_b(src0_r, 3); - mask1 = __msa_srli_b(src1_r, 3); - - offset_mask0 = __msa_vshf_b(mask0, offset1, offset0); - offset_mask1 = __msa_vshf_b(mask1, offset1, offset0); - - /* load in advance. */ - LD_UB4(src, src_stride, src0, src1, src2, src3); - - XORI_B2_128_SB(src0_r, src1_r); - - dst0 = __msa_adds_s_b(src0_r, offset_mask0); - dst1 = __msa_adds_s_b(src1_r, offset_mask1); - - XORI_B2_128_SB(dst0, dst1); - - /* store results */ - ST_D4(dst0, dst1, 0, 1, 0, 1, dst, dst_stride); - dst += dst_stride << 2; - } - - ILVR_D2_SB(src1, src0, src3, src2, src0_r, src1_r); - - mask0 = __msa_srli_b(src0_r, 3); - mask1 = __msa_srli_b(src1_r, 3); - - offset_mask0 = __msa_vshf_b(mask0, offset1, offset0); - offset_mask1 = __msa_vshf_b(mask1, offset1, offset0); - - XORI_B2_128_SB(src0_r, src1_r); - - dst0 = __msa_adds_s_b(src0_r, offset_mask0); - dst1 = __msa_adds_s_b(src1_r, offset_mask1); - - XORI_B2_128_SB(dst0, dst1); - - /* store results */ - ST_D4(dst0, dst1, 0, 1, 0, 1, dst, dst_stride); -} - -static void hevc_sao_band_filter_16multiple_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - int32_t sao_left_class, - const int16_t *sao_offset_val, - int32_t width, int32_t height) -{ - int32_t w_cnt; - v16u8 src0, src1, src2, src3; - v16i8 out0, out1, out2, out3; - v16i8 mask0, mask1, mask2, mask3; - v16i8 tmp0, tmp1, tmp2, tmp3, offset_val; - v16i8 offset0, offset1; - v16i8 zero = { 0 }; - - offset_val = LD_SB(sao_offset_val + 1); - offset_val = (v16i8) __msa_pckev_d((v2i64) offset_val, (v2i64) offset_val); - offset_val = __msa_pckev_b(offset_val, offset_val); - offset1 = (v16i8) __msa_insve_w((v4i32) zero, 3, (v4i32) offset_val); - offset0 = __msa_sld_b(offset1, zero, 28 - ((sao_left_class) & 31)); - offset1 = __msa_sld_b(zero, offset1, 28 - ((sao_left_class) & 31)); - - if (!((sao_left_class > 12) & (sao_left_class < 29))) { - SWAP(offset0, offset1); - } - - while (height > 0) { - /* load in advance */ - LD_UB4(src, src_stride, src0, src1, src2, src3); - - for (w_cnt = 16; w_cnt < width; w_cnt += 16) { - mask0 = __msa_srli_b((v16i8) src0, 3); - mask1 = __msa_srli_b((v16i8) src1, 3); - mask2 = __msa_srli_b((v16i8) src2, 3); - mask3 = __msa_srli_b((v16i8) src3, 3); - - VSHF_B2_SB(offset0, offset1, offset0, offset1, mask0, mask1, - tmp0, tmp1); - VSHF_B2_SB(offset0, offset1, offset0, offset1, mask2, mask3, - tmp2, tmp3); - XORI_B4_128_UB(src0, src1, src2, src3); - - out0 = __msa_adds_s_b((v16i8) src0, tmp0); - out1 = __msa_adds_s_b((v16i8) src1, tmp1); - out2 = __msa_adds_s_b((v16i8) src2, tmp2); - out3 = __msa_adds_s_b((v16i8) src3, tmp3); - - /* load for next iteration */ - LD_UB4(src + w_cnt, src_stride, src0, src1, src2, src3); - - XORI_B4_128_SB(out0, out1, out2, out3); - - ST_SB4(out0, out1, out2, out3, dst + w_cnt - 16, dst_stride); - } - - mask0 = __msa_srli_b((v16i8) src0, 3); - mask1 = __msa_srli_b((v16i8) src1, 3); - mask2 = __msa_srli_b((v16i8) src2, 3); - mask3 = __msa_srli_b((v16i8) src3, 3); - - VSHF_B2_SB(offset0, offset1, offset0, offset1, mask0, mask1, tmp0, - tmp1); - VSHF_B2_SB(offset0, offset1, offset0, offset1, mask2, mask3, tmp2, - tmp3); - XORI_B4_128_UB(src0, src1, src2, src3); - - out0 = __msa_adds_s_b((v16i8) src0, tmp0); - out1 = __msa_adds_s_b((v16i8) src1, tmp1); - out2 = __msa_adds_s_b((v16i8) src2, tmp2); - out3 = __msa_adds_s_b((v16i8) src3, tmp3); - - XORI_B4_128_SB(out0, out1, out2, out3); - - ST_SB4(out0, out1, out2, out3, dst + w_cnt - 16, dst_stride); - - src += src_stride << 2; - dst += dst_stride << 2; - height -= 4; - } -} - -static void hevc_sao_edge_filter_0degree_4width_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - uint32_t dst_val0, dst_val1; - v16u8 cmp_minus10, diff_minus10, diff_minus11, src_minus10, src_minus11; - v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - v16i8 sao_offset = LD_SB(sao_offset_val); - v16i8 src_plus10, offset, src0, dst0; - v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16i8 zero = { 0 }; - - sao_offset = __msa_pckev_b(sao_offset, sao_offset); - src -= 1; - - /* load in advance */ - LD_UB2(src, src_stride, src_minus10, src_minus11); - - for (height -= 2; height; height -= 2) { - src += (2 * src_stride); - - src_minus10 = (v16u8) __msa_pckev_d((v2i64) src_minus11, - (v2i64) src_minus10); - - src0 = (v16i8) __msa_sldi_b(zero, (v16i8) src_minus10, 1); - src_plus10 = (v16i8) __msa_sldi_b(zero, (v16i8) src_minus10, 2); - - cmp_minus10 = ((v16u8) src0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus10 = ((v16u8) src0 == (v16u8) src_plus10); - diff_minus11 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = ((v16u8) src_plus10 < (v16u8) src0); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus10); - - offset = (v16i8) diff_minus10 + (v16i8) diff_minus11 + 2; - - /* load in advance */ - LD_UB2(src, src_stride, src_minus10, src_minus11); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - src0 = (v16i8) __msa_xori_b((v16u8) src0, 128); - dst0 = __msa_adds_s_b(src0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - dst_val0 = __msa_copy_u_w((v4i32) dst0, 0); - dst_val1 = __msa_copy_u_w((v4i32) dst0, 2); - SW(dst_val0, dst); - dst += dst_stride; - SW(dst_val1, dst); - dst += dst_stride; - } - - src_minus10 = (v16u8) __msa_pckev_d((v2i64) src_minus11, - (v2i64) src_minus10); - - src0 = (v16i8) __msa_sldi_b(zero, (v16i8) src_minus10, 1); - src_plus10 = (v16i8) __msa_sldi_b(zero, (v16i8) src_minus10, 2); - - cmp_minus10 = ((v16u8) src0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus10 = ((v16u8) src0 == (v16u8) src_plus10); - diff_minus11 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = ((v16u8) src_plus10 < (v16u8) src0); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus10); - - offset = (v16i8) diff_minus10 + (v16i8) diff_minus11 + 2; - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - src0 = (v16i8) __msa_xori_b((v16u8) src0, 128); - dst0 = __msa_adds_s_b(src0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - dst_val0 = __msa_copy_u_w((v4i32) dst0, 0); - dst_val1 = __msa_copy_u_w((v4i32) dst0, 2); - - SW(dst_val0, dst); - dst += dst_stride; - SW(dst_val1, dst); -} - -static void hevc_sao_edge_filter_0degree_8width_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - uint64_t dst_val0, dst_val1; - v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16u8 cmp_minus10, diff_minus10, diff_minus11; - v16u8 src0, src1, dst0, src_minus10, src_minus11, src_plus10, src_plus11; - v16i8 offset, sao_offset = LD_SB(sao_offset_val); - v16i8 zeros = { 0 }; - - sao_offset = __msa_pckev_b(sao_offset, sao_offset); - src -= 1; - - /* load in advance */ - LD_UB2(src, src_stride, src_minus10, src_minus11); - - for (height -= 2; height; height -= 2) { - src += (src_stride << 1); - - SLDI_B2_UB(zeros, src_minus10, zeros, src_minus11, 1, src0, src1); - SLDI_B2_UB(zeros, src_minus10, zeros, src_minus11, 2, src_plus10, src_plus11); - - PCKEV_D2_UB(src_minus11, src_minus10, src_plus11, src_plus10, - src_minus10, src_plus10); - src0 = (v16u8) __msa_pckev_d((v2i64) src1, (v2i64) src0); - - cmp_minus10 = (src0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < src0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus10 = (src0 == src_plus10); - diff_minus11 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_plus10 < src0); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus10); - - offset = (v16i8) diff_minus10 + (v16i8) diff_minus11 + 2; - - /* load in advance */ - LD_UB2(src, src_stride, src_minus10, src_minus11); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - src0 = __msa_xori_b(src0, 128); - dst0 = (v16u8) __msa_adds_s_b((v16i8) src0, offset); - dst0 = __msa_xori_b(dst0, 128); - - dst_val0 = __msa_copy_u_d((v2i64) dst0, 0); - dst_val1 = __msa_copy_u_d((v2i64) dst0, 1); - SD(dst_val0, dst); - dst += dst_stride; - SD(dst_val1, dst); - dst += dst_stride; - } - - SLDI_B2_UB(zeros, src_minus10, zeros, src_minus11, 1, src0, src1); - SLDI_B2_UB(zeros, src_minus10, zeros, src_minus11, 2, src_plus10, src_plus11); - - PCKEV_D2_UB(src_minus11, src_minus10, src_plus11, src_plus10, src_minus10, - src_plus10); - src0 = (v16u8) __msa_pckev_d((v2i64) src1, (v2i64) src0); - - cmp_minus10 = ((v16u8) src0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus10 = (src0 == src_plus10); - diff_minus11 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_plus10 < src0); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus10); - - offset = (v16i8) diff_minus10 + (v16i8) diff_minus11 + 2; - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - src0 = __msa_xori_b(src0, 128); - dst0 = (v16u8) __msa_adds_s_b((v16i8) src0, offset); - dst0 = __msa_xori_b(dst0, 128); - - dst_val0 = __msa_copy_u_d((v2i64) dst0, 0); - dst_val1 = __msa_copy_u_d((v2i64) dst0, 1); - SD(dst_val0, dst); - dst += dst_stride; - SD(dst_val1, dst); -} - -static void hevc_sao_edge_filter_0degree_16multiple_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t width, - int32_t height) -{ - const uint8_t *src_minus1; - uint8_t *dst_ptr; - int32_t v_cnt; - v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16i8 sao_offset; - v16u8 cmp_minus10, cmp_plus10, diff_minus10, diff_plus10, cmp_minus11; - v16u8 cmp_plus11, diff_minus11, diff_plus11, cmp_minus12, cmp_plus12; - v16u8 diff_minus12, diff_plus12, cmp_minus13, cmp_plus13, diff_minus13; - v16u8 diff_plus13; - v16u8 src10, src11, src12, src13, dst0, dst1, dst2, dst3; - v16u8 src_minus10, src_minus11, src_minus12, src_minus13; - v16i8 offset_mask0, offset_mask1, offset_mask2, offset_mask3; - v16i8 src_zero0, src_zero1, src_zero2, src_zero3; - v16i8 src_plus10, src_plus11, src_plus12, src_plus13; - - sao_offset = LD_SB(sao_offset_val); - sao_offset = __msa_pckev_b(sao_offset, sao_offset); - - for (; height; height -= 4) { - src_minus1 = src - 1; - LD_UB4(src_minus1, src_stride, - src_minus10, src_minus11, src_minus12, src_minus13); - - for (v_cnt = 0; v_cnt < width; v_cnt += 16) { - src_minus1 += 16; - dst_ptr = dst + v_cnt; - LD_UB4(src_minus1, src_stride, src10, src11, src12, src13); - - SLDI_B4_SB(src10, src_minus10, src11, src_minus11, - src12, src_minus12, src13, src_minus13, 1, - src_zero0, src_zero1, src_zero2, src_zero3); - SLDI_B4_SB(src10, src_minus10, src11, src_minus11, - src12, src_minus12, src13, src_minus13, 2, - src_plus10, src_plus11, src_plus12, src_plus13); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - cmp_plus10 = ((v16u8) src_zero0 == (v16u8) src_plus10); - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - cmp_plus11 = ((v16u8) src_zero1 == (v16u8) src_plus11); - cmp_minus12 = ((v16u8) src_zero2 == src_minus12); - cmp_plus12 = ((v16u8) src_zero2 == (v16u8) src_plus12); - cmp_minus13 = ((v16u8) src_zero3 == src_minus13); - cmp_plus13 = ((v16u8) src_zero3 == (v16u8) src_plus13); - - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - diff_plus10 = __msa_nor_v(cmp_plus10, cmp_plus10); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - diff_plus11 = __msa_nor_v(cmp_plus11, cmp_plus11); - diff_minus12 = __msa_nor_v(cmp_minus12, cmp_minus12); - diff_plus12 = __msa_nor_v(cmp_plus12, cmp_plus12); - diff_minus13 = __msa_nor_v(cmp_minus13, cmp_minus13); - diff_plus13 = __msa_nor_v(cmp_plus13, cmp_plus13); - - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - cmp_plus10 = ((v16u8) src_plus10 < (v16u8) src_zero0); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - cmp_plus11 = ((v16u8) src_plus11 < (v16u8) src_zero1); - cmp_minus12 = (src_minus12 < (v16u8) src_zero2); - cmp_plus12 = ((v16u8) src_plus12 < (v16u8) src_zero2); - cmp_minus13 = (src_minus13 < (v16u8) src_zero3); - cmp_plus13 = ((v16u8) src_plus13 < (v16u8) src_zero3); - - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - diff_plus10 = __msa_bmnz_v(diff_plus10, const1, cmp_plus10); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - diff_plus11 = __msa_bmnz_v(diff_plus11, const1, cmp_plus11); - diff_minus12 = __msa_bmnz_v(diff_minus12, const1, cmp_minus12); - diff_plus12 = __msa_bmnz_v(diff_plus12, const1, cmp_plus12); - diff_minus13 = __msa_bmnz_v(diff_minus13, const1, cmp_minus13); - diff_plus13 = __msa_bmnz_v(diff_plus13, const1, cmp_plus13); - - offset_mask0 = 2 + (v16i8) diff_minus10 + (v16i8) diff_plus10; - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask0, - offset_mask0, offset_mask0, offset_mask0); - offset_mask1 = 2 + (v16i8) diff_minus11 + (v16i8) diff_plus11; - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask1, - offset_mask1, offset_mask1, offset_mask1); - offset_mask2 = 2 + (v16i8) diff_minus12 + (v16i8) diff_plus12; - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask2, - offset_mask2, offset_mask2, offset_mask2); - offset_mask3 = 2 + (v16i8) diff_minus13 + (v16i8) diff_plus13; - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask3, - offset_mask3, offset_mask3, offset_mask3); - - XORI_B4_128_SB(src_zero0, src_zero1, src_zero2, src_zero3); - - dst0 = (v16u8) __msa_adds_s_b((v16i8) src_zero0, offset_mask0); - dst1 = (v16u8) __msa_adds_s_b((v16i8) src_zero1, offset_mask1); - dst2 = (v16u8) __msa_adds_s_b((v16i8) src_zero2, offset_mask2); - dst3 = (v16u8) __msa_adds_s_b((v16i8) src_zero3, offset_mask3); - - XORI_B4_128_UB(dst0, dst1, dst2, dst3); - - src_minus10 = src10; - ST_UB(dst0, dst_ptr); - src_minus11 = src11; - ST_UB(dst1, dst_ptr + dst_stride); - src_minus12 = src12; - ST_UB(dst2, dst_ptr + (dst_stride << 1)); - src_minus13 = src13; - ST_UB(dst3, dst_ptr + (dst_stride * 3)); - } - - src += (src_stride << 2); - dst += (dst_stride << 2); - } -} - -static void hevc_sao_edge_filter_90degree_4width_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - uint32_t dst_val0, dst_val1; - v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16i8 dst0; - v16i8 sao_offset = LD_SB(sao_offset_val); - v16u8 cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; - v16u8 src_minus10, src_minus11, src10, src11; - v16i8 src_zero0, src_zero1; - v16i8 offset; - v8i16 offset_mask0, offset_mask1; - - sao_offset = __msa_pckev_b(sao_offset, sao_offset); - - /* load in advance */ - LD_UB2(src - src_stride, src_stride, src_minus10, src_minus11); - LD_UB2(src + src_stride, src_stride, src10, src11); - - for (height -= 2; height; height -= 2) { - src += (src_stride << 1); - - src_minus10 = (v16u8) __msa_ilvr_b((v16i8) src10, (v16i8) src_minus10); - src_zero0 = __msa_ilvr_b((v16i8) src_minus11, (v16i8) src_minus11); - src_minus11 = (v16u8) __msa_ilvr_b((v16i8) src11, (v16i8) src_minus11); - src_zero1 = __msa_ilvr_b((v16i8) src10, (v16i8) src10); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - - offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); - offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - - offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); - dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - dst0 = __msa_adds_s_b(dst0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - LD_UB2(src + src_stride, src_stride, src10, src11); - - dst_val0 = __msa_copy_u_w((v4i32) dst0, 0); - dst_val1 = __msa_copy_u_w((v4i32) dst0, 2); - SW(dst_val0, dst); - dst += dst_stride; - SW(dst_val1, dst); - - dst += dst_stride; - } - - src_minus10 = (v16u8) __msa_ilvr_b((v16i8) src10, (v16i8) src_minus10); - src_zero0 = __msa_ilvr_b((v16i8) src_minus11, (v16i8) src_minus11); - src_minus11 = (v16u8) __msa_ilvr_b((v16i8) src11, (v16i8) src_minus11); - src_zero1 = __msa_ilvr_b((v16i8) src10, (v16i8) src10); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - - offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); - offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - - offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); - dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, - offset, offset, offset); - - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - dst0 = __msa_adds_s_b(dst0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - dst_val0 = __msa_copy_u_w((v4i32) dst0, 0); - dst_val1 = __msa_copy_u_w((v4i32) dst0, 2); - SW(dst_val0, dst); - dst += dst_stride; - SW(dst_val1, dst); -} - -static void hevc_sao_edge_filter_90degree_8width_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - uint64_t dst_val0, dst_val1; - v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16i8 offset, sao_offset = LD_SB(sao_offset_val); - v16i8 src_zero0, src_zero1, dst0; - v16u8 cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; - v16u8 src_minus10, src_minus11, src10, src11; - v8i16 offset_mask0, offset_mask1; - - sao_offset = __msa_pckev_b(sao_offset, sao_offset); - - /* load in advance */ - LD_UB2(src - src_stride, src_stride, src_minus10, src_minus11); - LD_UB2(src + src_stride, src_stride, src10, src11); - - for (height -= 2; height; height -= 2) { - src += (src_stride << 1); - - src_minus10 = (v16u8) __msa_ilvr_b((v16i8) src10, (v16i8) src_minus10); - src_zero0 = __msa_ilvr_b((v16i8) src_minus11, (v16i8) src_minus11); - src_minus11 = (v16u8) __msa_ilvr_b((v16i8) src11, (v16i8) src_minus11); - src_zero1 = __msa_ilvr_b((v16i8) src10, (v16i8) src10); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - - offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); - offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - - offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); - dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, - offset, offset, offset); - - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - dst0 = __msa_adds_s_b(dst0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - LD_UB2(src + src_stride, src_stride, src10, src11); - - dst_val0 = __msa_copy_u_d((v2i64) dst0, 0); - dst_val1 = __msa_copy_u_d((v2i64) dst0, 1); - SD(dst_val0, dst); - dst += dst_stride; - SD(dst_val1, dst); - dst += dst_stride; - } - - src_minus10 = (v16u8) __msa_ilvr_b((v16i8) src10, (v16i8) src_minus10); - src_zero0 = __msa_ilvr_b((v16i8) src_minus11, (v16i8) src_minus11); - src_minus11 = (v16u8) __msa_ilvr_b((v16i8) src11, (v16i8) src_minus11); - src_zero1 = __msa_ilvr_b((v16i8) src10, (v16i8) src10); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - - offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); - offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - - offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); - dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - dst0 = __msa_adds_s_b(dst0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - dst_val0 = __msa_copy_u_d((v2i64) dst0, 0); - dst_val1 = __msa_copy_u_d((v2i64) dst0, 1); - SD(dst_val0, dst); - dst += dst_stride; - SD(dst_val1, dst); -} - -static void hevc_sao_edge_filter_90degree_16multiple_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t * - sao_offset_val, - int32_t width, - int32_t height) -{ - const uint8_t *src_orig = src; - uint8_t *dst_orig = dst; - int32_t h_cnt, v_cnt; - v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16u8 cmp_minus10, cmp_plus10, diff_minus10, diff_plus10, cmp_minus11; - v16u8 cmp_plus11, diff_minus11, diff_plus11, cmp_minus12, cmp_plus12; - v16u8 diff_minus12, diff_plus12, cmp_minus13, cmp_plus13, diff_minus13; - v16u8 diff_plus13; - v16u8 src10, src_minus10, dst0, src11, src_minus11, dst1; - v16u8 src12, dst2, src13, dst3; - v16i8 offset_mask0, offset_mask1, offset_mask2, offset_mask3, sao_offset; - - sao_offset = LD_SB(sao_offset_val); - sao_offset = __msa_pckev_b(sao_offset, sao_offset); - - for (v_cnt = 0; v_cnt < width; v_cnt += 16) { - src = src_orig + v_cnt; - dst = dst_orig + v_cnt; - - LD_UB2(src - src_stride, src_stride, src_minus10, src_minus11); - - for (h_cnt = (height >> 2); h_cnt--;) { - LD_UB4(src + src_stride, src_stride, src10, src11, src12, src13); - - cmp_minus10 = (src_minus11 == src_minus10); - cmp_plus10 = (src_minus11 == src10); - cmp_minus11 = (src10 == src_minus11); - cmp_plus11 = (src10 == src11); - cmp_minus12 = (src11 == src10); - cmp_plus12 = (src11 == src12); - cmp_minus13 = (src12 == src11); - cmp_plus13 = (src12 == src13); - - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - diff_plus10 = __msa_nor_v(cmp_plus10, cmp_plus10); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - diff_plus11 = __msa_nor_v(cmp_plus11, cmp_plus11); - diff_minus12 = __msa_nor_v(cmp_minus12, cmp_minus12); - diff_plus12 = __msa_nor_v(cmp_plus12, cmp_plus12); - diff_minus13 = __msa_nor_v(cmp_minus13, cmp_minus13); - diff_plus13 = __msa_nor_v(cmp_plus13, cmp_plus13); - - cmp_minus10 = (src_minus10 < src_minus11); - cmp_plus10 = (src10 < src_minus11); - cmp_minus11 = (src_minus11 < src10); - cmp_plus11 = (src11 < src10); - cmp_minus12 = (src10 < src11); - cmp_plus12 = (src12 < src11); - cmp_minus13 = (src11 < src12); - cmp_plus13 = (src13 < src12); - - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - diff_plus10 = __msa_bmnz_v(diff_plus10, const1, cmp_plus10); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - diff_plus11 = __msa_bmnz_v(diff_plus11, const1, cmp_plus11); - diff_minus12 = __msa_bmnz_v(diff_minus12, const1, cmp_minus12); - diff_plus12 = __msa_bmnz_v(diff_plus12, const1, cmp_plus12); - diff_minus13 = __msa_bmnz_v(diff_minus13, const1, cmp_minus13); - diff_plus13 = __msa_bmnz_v(diff_plus13, const1, cmp_plus13); - - offset_mask0 = 2 + (v16i8) diff_minus10 + (v16i8) diff_plus10; - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, - offset_mask0, offset_mask0, offset_mask0, offset_mask0); - offset_mask1 = 2 + (v16i8) diff_minus11 + (v16i8) diff_plus11; - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, - offset_mask1, offset_mask1, offset_mask1, offset_mask1); - offset_mask2 = 2 + (v16i8) diff_minus12 + (v16i8) diff_plus12; - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, - offset_mask2, offset_mask2, offset_mask2, offset_mask2); - offset_mask3 = 2 + (v16i8) diff_minus13 + (v16i8) diff_plus13; - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, - offset_mask3, offset_mask3, offset_mask3, offset_mask3); - - src_minus10 = src12; - XORI_B4_128_UB(src_minus11, src10, src11, src12); - - dst0 = (v16u8) __msa_adds_s_b((v16i8) src_minus11, offset_mask0); - dst1 = (v16u8) __msa_adds_s_b((v16i8) src10, offset_mask1); - dst2 = (v16u8) __msa_adds_s_b((v16i8) src11, offset_mask2); - dst3 = (v16u8) __msa_adds_s_b((v16i8) src12, offset_mask3); - - XORI_B4_128_UB(dst0, dst1, dst2, dst3); - src_minus11 = src13; - - ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride); - - src += (src_stride << 2); - dst += (dst_stride << 2); - } - } -} - -static void hevc_sao_edge_filter_45degree_4width_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - const uint8_t *src_orig; - uint32_t dst_val0, dst_val1; - v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16i8 offset, sao_offset = LD_SB(sao_offset_val); - v16u8 cmp_minus10, diff_minus10, src_minus10, cmp_minus11, diff_minus11; - v16u8 src_minus11, src10, src11; - v16i8 src_plus0, src_zero0, src_plus1, src_zero1, dst0; - v8i16 offset_mask0, offset_mask1; - v16i8 zeros = { 0 }; - - sao_offset = __msa_pckev_b(sao_offset, sao_offset); - - src_orig = src - 1; - - /* load in advance */ - LD_UB2(src_orig - src_stride, src_stride, src_minus10, src_minus11); - LD_UB2(src_orig + src_stride, src_stride, src10, src11); - - for (height -= 2; height; height -= 2) { - src_orig += (src_stride << 1); - - SLDI_B2_SB(zeros, src_minus11, zeros, src10, 1, src_zero0, src_zero1); - SLDI_B2_SB(zeros, src10, zeros, src11, 2, src_plus0, src_plus1); - - ILVR_B2_UB(src_plus0, src_minus10, src_plus1, src_minus11, src_minus10, - src_minus11); - ILVR_B2_SB(src_zero0, src_zero0, src_zero1, src_zero1, src_zero0, - src_zero1); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - - offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); - offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - - offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); - dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, - offset, offset, offset); - - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - dst0 = __msa_adds_s_b(dst0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - LD_UB2(src_orig + src_stride, src_stride, src10, src11); - - dst_val0 = __msa_copy_u_w((v4i32) dst0, 0); - dst_val1 = __msa_copy_u_w((v4i32) dst0, 2); - SW(dst_val0, dst); - dst += dst_stride; - SW(dst_val1, dst); - - dst += dst_stride; - } - - SLDI_B2_SB(zeros, src_minus11, zeros, src10, 1, src_zero0, src_zero1); - SLDI_B2_SB(zeros, src10, zeros, src11, 2, src_plus0, src_plus1); - - ILVR_B2_UB(src_plus0, src_minus10, src_plus1, src_minus11, src_minus10, - src_minus11); - ILVR_B2_SB(src_zero0, src_zero0, src_zero1, src_zero1, src_zero0, - src_zero1); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - - offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); - offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - - offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); - dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - dst0 = __msa_adds_s_b(dst0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - dst_val0 = __msa_copy_u_w((v4i32) dst0, 0); - dst_val1 = __msa_copy_u_w((v4i32) dst0, 2); - SW(dst_val0, dst); - dst += dst_stride; - SW(dst_val1, dst); -} - -static void hevc_sao_edge_filter_45degree_8width_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - const uint8_t *src_orig; - uint64_t dst_val0, dst_val1; - v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16i8 offset, sao_offset = LD_SB(sao_offset_val); - v16u8 cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; - v16u8 src_minus10, src10, src_minus11, src11; - v16i8 src_zero0, src_plus10, src_zero1, src_plus11, dst0; - v8i16 offset_mask0, offset_mask1; - v16i8 zeros = { 0 }; - - sao_offset = __msa_pckev_b(sao_offset, sao_offset); - src_orig = src - 1; - - /* load in advance */ - LD_UB2(src_orig - src_stride, src_stride, src_minus10, src_minus11); - LD_UB2(src_orig + src_stride, src_stride, src10, src11); - - for (height -= 2; height; height -= 2) { - src_orig += (src_stride << 1); - - SLDI_B2_SB(zeros, src_minus11, zeros, src10, 1, src_zero0, src_zero1); - SLDI_B2_SB(zeros, src10, zeros, src11, 2, src_plus10, src_plus11); - - ILVR_B2_UB(src_plus10, src_minus10, src_plus11, src_minus11, - src_minus10, src_minus11); - ILVR_B2_SB(src_zero0, src_zero0, src_zero1, src_zero1, - src_zero0, src_zero1); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - - offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); - offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - - offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); - dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - dst0 = __msa_adds_s_b(dst0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - LD_UB2(src_orig + src_stride, src_stride, src10, src11); - - dst_val0 = __msa_copy_u_d((v2i64) dst0, 0); - dst_val1 = __msa_copy_u_d((v2i64) dst0, 1); - SD(dst_val0, dst); - dst += dst_stride; - SD(dst_val1, dst); - dst += dst_stride; - } - - SLDI_B2_SB(zeros, src_minus11, zeros, src10, 1, src_zero0, src_zero1); - SLDI_B2_SB(zeros, src10, zeros, src11, 2, src_plus10, src_plus11); - ILVR_B2_UB(src_plus10, src_minus10, src_plus11, src_minus11, src_minus10, - src_minus11); - ILVR_B2_SB(src_zero0, src_zero0, src_zero1, src_zero1, src_zero0, - src_zero1); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - - offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); - offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - - offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); - dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - dst0 = __msa_adds_s_b(dst0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - LD_UB2(src_orig + src_stride, src_stride, src10, src11); - - dst_val0 = __msa_copy_u_d((v2i64) dst0, 0); - dst_val1 = __msa_copy_u_d((v2i64) dst0, 1); - SD(dst_val0, dst); - dst += dst_stride; - SD(dst_val1, dst); -} - -static void hevc_sao_edge_filter_45degree_16multiple_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t * - sao_offset_val, - int32_t width, - int32_t height) -{ - const uint8_t *src_orig = src; - uint8_t *dst_orig = dst; - int32_t v_cnt; - v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16u8 cmp_minus10, cmp_plus10, diff_minus10, diff_plus10, cmp_minus11; - v16u8 cmp_plus11, diff_minus11, diff_plus11, cmp_minus12, cmp_plus12; - v16u8 diff_minus12, diff_plus12, cmp_minus13, cmp_plus13, diff_minus13; - v16u8 diff_plus13, src_minus14, src_plus13; - v16i8 offset_mask0, offset_mask1, offset_mask2, offset_mask3; - v16u8 src10, src_minus10, dst0, src11, src_minus11, dst1; - v16u8 src12, src_minus12, dst2, src13, src_minus13, dst3; - v16i8 src_zero0, src_plus10, src_zero1, src_plus11, src_zero2, src_plus12; - v16i8 src_zero3, sao_offset; - - sao_offset = LD_SB(sao_offset_val); - sao_offset = __msa_pckev_b(sao_offset, sao_offset); - - for (; height; height -= 4) { - src_orig = src - 1; - dst_orig = dst; - LD_UB4(src_orig, src_stride, src_minus11, src_minus12, src_minus13, - src_minus14); - - for (v_cnt = 0; v_cnt < width; v_cnt += 16) { - src_minus10 = LD_UB(src_orig - src_stride); - LD_UB4(src_orig + 16, src_stride, src10, src11, src12, src13); - src_plus13 = LD_UB(src + 1 + v_cnt + (src_stride << 2)); - src_orig += 16; - - SLDI_B4_SB(src10, src_minus11, src11, src_minus12, - src12, src_minus13, src13, src_minus14, 1, - src_zero0, src_zero1, src_zero2, src_zero3); - SLDI_B2_SB(src11, src_minus12, src12, src_minus13, 2, src_plus10, - src_plus11); - - src_plus12 = __msa_sldi_b((v16i8) src13, (v16i8) src_minus14, 2); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - cmp_plus10 = ((v16u8) src_zero0 == (v16u8) src_plus10); - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - cmp_plus11 = ((v16u8) src_zero1 == (v16u8) src_plus11); - cmp_minus12 = ((v16u8) src_zero2 == src_minus12); - cmp_plus12 = ((v16u8) src_zero2 == (v16u8) src_plus12); - cmp_minus13 = ((v16u8) src_zero3 == src_minus13); - cmp_plus13 = ((v16u8) src_zero3 == src_plus13); - - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - diff_plus10 = __msa_nor_v(cmp_plus10, cmp_plus10); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - diff_plus11 = __msa_nor_v(cmp_plus11, cmp_plus11); - diff_minus12 = __msa_nor_v(cmp_minus12, cmp_minus12); - diff_plus12 = __msa_nor_v(cmp_plus12, cmp_plus12); - diff_minus13 = __msa_nor_v(cmp_minus13, cmp_minus13); - diff_plus13 = __msa_nor_v(cmp_plus13, cmp_plus13); - - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - cmp_plus10 = ((v16u8) src_plus10 < (v16u8) src_zero0); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - cmp_plus11 = ((v16u8) src_plus11 < (v16u8) src_zero1); - cmp_minus12 = (src_minus12 < (v16u8) src_zero2); - cmp_plus12 = ((v16u8) src_plus12 < (v16u8) src_zero2); - cmp_minus13 = (src_minus13 < (v16u8) src_zero3); - cmp_plus13 = (src_plus13 < (v16u8) src_zero3); - - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - diff_plus10 = __msa_bmnz_v(diff_plus10, const1, cmp_plus10); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - diff_plus11 = __msa_bmnz_v(diff_plus11, const1, cmp_plus11); - diff_minus12 = __msa_bmnz_v(diff_minus12, const1, cmp_minus12); - diff_plus12 = __msa_bmnz_v(diff_plus12, const1, cmp_plus12); - diff_minus13 = __msa_bmnz_v(diff_minus13, const1, cmp_minus13); - diff_plus13 = __msa_bmnz_v(diff_plus13, const1, cmp_plus13); - - offset_mask0 = 2 + (v16i8) diff_minus10 + (v16i8) diff_plus10; - offset_mask1 = 2 + (v16i8) diff_minus11 + (v16i8) diff_plus11; - offset_mask2 = 2 + (v16i8) diff_minus12 + (v16i8) diff_plus12; - offset_mask3 = 2 + (v16i8) diff_minus13 + (v16i8) diff_plus13; - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, - offset_mask0, offset_mask0, offset_mask0, offset_mask0); - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, - offset_mask1, offset_mask1, offset_mask1, offset_mask1); - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, - offset_mask2, offset_mask2, offset_mask2, offset_mask2); - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, - offset_mask3, offset_mask3, offset_mask3, offset_mask3); - - XORI_B4_128_SB(src_zero0, src_zero1, src_zero2, src_zero3); - - dst0 = (v16u8) __msa_adds_s_b((v16i8) src_zero0, offset_mask0); - dst1 = (v16u8) __msa_adds_s_b((v16i8) src_zero1, offset_mask1); - dst2 = (v16u8) __msa_adds_s_b((v16i8) src_zero2, offset_mask2); - dst3 = (v16u8) __msa_adds_s_b((v16i8) src_zero3, offset_mask3); - - XORI_B4_128_UB(dst0, dst1, dst2, dst3); - - src_minus11 = src10; - src_minus12 = src11; - src_minus13 = src12; - src_minus14 = src13; - - ST_UB4(dst0, dst1, dst2, dst3, dst_orig, dst_stride); - dst_orig += 16; - } - - src += (src_stride << 2); - dst += (dst_stride << 2); - } -} - -static void hevc_sao_edge_filter_135degree_4width_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - const uint8_t *src_orig; - uint32_t dst_val0, dst_val1; - v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16i8 offset, sao_offset = LD_SB(sao_offset_val); - v16i8 src_zero0, src_zero1, dst0; - v16u8 cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; - v16u8 src_minus10, src10, src_minus11, src11; - v8i16 offset_mask0, offset_mask1; - v16i8 zeros = { 0 }; - - sao_offset = __msa_pckev_b(sao_offset, sao_offset); - src_orig = src - 1; - - /* load in advance */ - LD_UB2(src_orig - src_stride, src_stride, src_minus10, src_minus11); - LD_UB2(src_orig + src_stride, src_stride, src10, src11); - - for (height -= 2; height; height -= 2) { - src_orig += (src_stride << 1); - - SLDI_B2_SB(zeros, src_minus11, zeros, src10, 1, src_zero0, src_zero1); - SLDI_B2_UB(zeros, src_minus10, zeros, src_minus11, 2, src_minus10, src_minus11); - - ILVR_B2_UB(src10, src_minus10, src11, src_minus11, src_minus10, - src_minus11); - ILVR_B2_SB(src_zero0, src_zero0, src_zero1, src_zero1, src_zero0, - src_zero1); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - - offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); - offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - - offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); - dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - dst0 = __msa_adds_s_b(dst0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - LD_UB2(src_orig + src_stride, src_stride, src10, src11); - - dst_val0 = __msa_copy_u_w((v4i32) dst0, 0); - dst_val1 = __msa_copy_u_w((v4i32) dst0, 2); - - SW(dst_val0, dst); - dst += dst_stride; - SW(dst_val1, dst); - - dst += dst_stride; - } - - SLDI_B2_SB(zeros, src_minus11, zeros, src10, 1, src_zero0, src_zero1); - SLDI_B2_UB(zeros, src_minus10, zeros, src_minus11, 2, src_minus10, src_minus11); - - ILVR_B2_UB(src10, src_minus10, src11, src_minus11, src_minus10, - src_minus11); - ILVR_B2_SB(src_zero0, src_zero0, src_zero1, src_zero1, src_zero0, - src_zero1); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - - offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); - offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - - offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); - dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - dst0 = __msa_adds_s_b(dst0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - dst_val0 = __msa_copy_u_w((v4i32) dst0, 0); - dst_val1 = __msa_copy_u_w((v4i32) dst0, 2); - - SW(dst_val0, dst); - dst += dst_stride; - SW(dst_val1, dst); - dst += dst_stride; -} - -static void hevc_sao_edge_filter_135degree_8width_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t *sao_offset_val, - int32_t height) -{ - const uint8_t *src_orig; - uint64_t dst_val0, dst_val1; - v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16i8 offset, sao_offset = LD_SB(sao_offset_val); - v16u8 cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; - v16u8 src_minus10, src10, src_minus11, src11; - v16i8 src_zero0, src_zero1, dst0; - v8i16 offset_mask0, offset_mask1; - v16i8 zeros = { 0 }; - - sao_offset = __msa_pckev_b(sao_offset, sao_offset); - src_orig = src - 1; - - /* load in advance */ - LD_UB2(src_orig - src_stride, src_stride, src_minus10, src_minus11); - LD_UB2(src_orig + src_stride, src_stride, src10, src11); - - for (height -= 2; height; height -= 2) { - src_orig += (src_stride << 1); - - SLDI_B2_SB(zeros, src_minus11, zeros, src10, 1, src_zero0, src_zero1); - SLDI_B2_UB(zeros, src_minus10, zeros, src_minus11, 2, src_minus10, src_minus11); - ILVR_B2_UB(src10, src_minus10, src11, src_minus11, src_minus10, - src_minus11); - ILVR_B2_SB(src_zero0, src_zero0, src_zero1, src_zero1, src_zero0, - src_zero1); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - - offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); - offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - - offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); - dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - dst0 = __msa_adds_s_b(dst0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - src_minus10 = src10; - src_minus11 = src11; - - /* load in advance */ - LD_UB2(src_orig + src_stride, src_stride, src10, src11); - - dst_val0 = __msa_copy_u_d((v2i64) dst0, 0); - dst_val1 = __msa_copy_u_d((v2i64) dst0, 1); - - SD(dst_val0, dst); - dst += dst_stride; - SD(dst_val1, dst); - dst += dst_stride; - } - - SLDI_B2_SB(zeros, src_minus11, zeros, src10, 1, src_zero0, src_zero1); - SLDI_B2_UB(zeros, src_minus10, zeros, src_minus11, 2, src_minus10, src_minus11); - ILVR_B2_UB(src10, src_minus10, src11, src_minus11, src_minus10, - src_minus11); - ILVR_B2_SB(src_zero0, src_zero0, src_zero1, src_zero1, src_zero0, - src_zero1); - - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - - offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); - offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - - offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); - dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, - offset, offset); - - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - dst0 = __msa_adds_s_b(dst0, offset); - dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); - - dst_val0 = __msa_copy_u_d((v2i64) dst0, 0); - dst_val1 = __msa_copy_u_d((v2i64) dst0, 1); - - SD(dst_val0, dst); - dst += dst_stride; - SD(dst_val1, dst); - dst += dst_stride; -} - -static void hevc_sao_edge_filter_135degree_16multiple_msa(uint8_t *dst, - int32_t dst_stride, - const uint8_t *src, - int32_t src_stride, - const int16_t * - sao_offset_val, - int32_t width, - int32_t height) -{ - const uint8_t *src_orig; - uint8_t *dst_orig; - int32_t v_cnt; - v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16u8 dst0, dst1, dst2, dst3; - v16u8 cmp_minus10, cmp_minus11, cmp_minus12, cmp_minus13, cmp_plus10; - v16u8 cmp_plus11, cmp_plus12, cmp_plus13, diff_minus10, diff_minus11; - v16u8 diff_minus12, diff_minus13, diff_plus10, diff_plus11, diff_plus12; - v16u8 diff_plus13, src10, src11, src12, src13, src_minus10, src_minus11; - v16u8 src_plus10, src_plus11, src_plus12, src_plus13; - v16i8 src_minus12, src_minus13, src_zero0, src_zero1, src_zero2, src_zero3; - v16i8 offset_mask0, offset_mask1, offset_mask2, offset_mask3, sao_offset; - - sao_offset = LD_SB(sao_offset_val); - sao_offset = __msa_pckev_b(sao_offset, sao_offset); - - for (; height; height -= 4) { - src_orig = src - 1; - dst_orig = dst; - - LD_UB4(src_orig, src_stride, src_minus11, src_plus10, src_plus11, - src_plus12); - - for (v_cnt = 0; v_cnt < width; v_cnt += 16) { - src_minus10 = LD_UB(src_orig + 2 - src_stride); - LD_UB4(src_orig + 16, src_stride, src10, src11, src12, src13); - src_plus13 = LD_UB(src_orig + (src_stride << 2)); - src_orig += 16; - - src_zero0 = __msa_sldi_b((v16i8) src10, (v16i8) src_minus11, 1); - cmp_minus10 = ((v16u8) src_zero0 == src_minus10); - cmp_plus10 = ((v16u8) src_zero0 == src_plus10); - - src_zero1 = __msa_sldi_b((v16i8) src11, (v16i8) src_plus10, 1); - src_minus11 = (v16u8) __msa_sldi_b((v16i8) src10, - (v16i8) src_minus11, 2); - cmp_minus11 = ((v16u8) src_zero1 == src_minus11); - cmp_plus11 = ((v16u8) src_zero1 == src_plus11); - - src_zero2 = __msa_sldi_b((v16i8) src12, (v16i8) src_plus11, 1); - src_minus12 = __msa_sldi_b((v16i8) src11, (v16i8) src_plus10, 2); - cmp_minus12 = ((v16u8) src_zero2 == (v16u8) src_minus12); - cmp_plus12 = ((v16u8) src_zero2 == src_plus12); - - src_zero3 = __msa_sldi_b((v16i8) src13, (v16i8) src_plus12, 1); - src_minus13 = __msa_sldi_b((v16i8) src12, (v16i8) src_plus11, 2); - cmp_minus13 = ((v16u8) src_zero3 == (v16u8) src_minus13); - cmp_plus13 = ((v16u8) src_zero3 == src_plus13); - - diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); - diff_plus10 = __msa_nor_v(cmp_plus10, cmp_plus10); - diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); - diff_plus11 = __msa_nor_v(cmp_plus11, cmp_plus11); - diff_minus12 = __msa_nor_v(cmp_minus12, cmp_minus12); - diff_plus12 = __msa_nor_v(cmp_plus12, cmp_plus12); - diff_minus13 = __msa_nor_v(cmp_minus13, cmp_minus13); - diff_plus13 = __msa_nor_v(cmp_plus13, cmp_plus13); - - cmp_minus10 = (src_minus10 < (v16u8) src_zero0); - cmp_plus10 = (src_plus10 < (v16u8) src_zero0); - cmp_minus11 = (src_minus11 < (v16u8) src_zero1); - cmp_plus11 = (src_plus11 < (v16u8) src_zero1); - cmp_minus12 = ((v16u8) src_minus12 < (v16u8) src_zero2); - cmp_plus12 = (src_plus12 < (v16u8) src_zero2); - cmp_minus13 = ((v16u8) src_minus13 < (v16u8) src_zero3); - cmp_plus13 = (src_plus13 < (v16u8) src_zero3); - - diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); - diff_plus10 = __msa_bmnz_v(diff_plus10, const1, cmp_plus10); - diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); - diff_plus11 = __msa_bmnz_v(diff_plus11, const1, cmp_plus11); - diff_minus12 = __msa_bmnz_v(diff_minus12, const1, cmp_minus12); - diff_plus12 = __msa_bmnz_v(diff_plus12, const1, cmp_plus12); - diff_minus13 = __msa_bmnz_v(diff_minus13, const1, cmp_minus13); - diff_plus13 = __msa_bmnz_v(diff_plus13, const1, cmp_plus13); - - offset_mask0 = 2 + (v16i8) diff_minus10 + (v16i8) diff_plus10; - offset_mask1 = 2 + (v16i8) diff_minus11 + (v16i8) diff_plus11; - offset_mask2 = 2 + (v16i8) diff_minus12 + (v16i8) diff_plus12; - offset_mask3 = 2 + (v16i8) diff_minus13 + (v16i8) diff_plus13; - - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, - offset_mask0, offset_mask0, offset_mask0, offset_mask0); - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, - offset_mask1, offset_mask1, offset_mask1, offset_mask1); - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, - offset_mask2, offset_mask2, offset_mask2, offset_mask2); - VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, - offset_mask3, offset_mask3, offset_mask3, offset_mask3); - - XORI_B4_128_SB(src_zero0, src_zero1, src_zero2, src_zero3); - - dst0 = (v16u8) __msa_adds_s_b((v16i8) src_zero0, offset_mask0); - dst1 = (v16u8) __msa_adds_s_b((v16i8) src_zero1, offset_mask1); - dst2 = (v16u8) __msa_adds_s_b((v16i8) src_zero2, offset_mask2); - dst3 = (v16u8) __msa_adds_s_b((v16i8) src_zero3, offset_mask3); - - XORI_B4_128_UB(dst0, dst1, dst2, dst3); - - src_minus11 = src10; - src_plus10 = src11; - src_plus11 = src12; - src_plus12 = src13; - - ST_UB4(dst0, dst1, dst2, dst3, dst_orig, dst_stride); - dst_orig += 16; - } - - src += (src_stride << 2); - dst += (dst_stride << 2); - } -} - -void ff_hevc_loop_filter_luma_h_8_msa(uint8_t *src, - ptrdiff_t src_stride, - int32_t beta, const int32_t *tc, - const uint8_t *no_p, const uint8_t *no_q) -{ - hevc_loopfilter_luma_hor_msa(src, src_stride, beta, tc, no_p, no_q); -} - -void ff_hevc_loop_filter_luma_v_8_msa(uint8_t *src, - ptrdiff_t src_stride, - int32_t beta, const int32_t *tc, - const uint8_t *no_p, const uint8_t *no_q) -{ - hevc_loopfilter_luma_ver_msa(src, src_stride, beta, tc, no_p, no_q); -} - -void ff_hevc_loop_filter_chroma_h_8_msa(uint8_t *src, - ptrdiff_t src_stride, - const int32_t *tc, const uint8_t *no_p, - const uint8_t *no_q) -{ - hevc_loopfilter_chroma_hor_msa(src, src_stride, tc, no_p, no_q); -} - -void ff_hevc_loop_filter_chroma_v_8_msa(uint8_t *src, - ptrdiff_t src_stride, - const int32_t *tc, const uint8_t *no_p, - const uint8_t *no_q) -{ - hevc_loopfilter_chroma_ver_msa(src, src_stride, tc, no_p, no_q); -} - -void ff_hevc_sao_band_filter_0_8_msa(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride_dst, ptrdiff_t stride_src, - const int16_t *sao_offset_val, int sao_left_class, - int width, int height) -{ - if (width >> 4) { - hevc_sao_band_filter_16multiple_msa(dst, stride_dst, src, stride_src, - sao_left_class, sao_offset_val, - width - (width % 16), height); - dst += width - (width % 16); - src += width - (width % 16); - width %= 16; - } - - if (width >> 3) { - hevc_sao_band_filter_8width_msa(dst, stride_dst, src, stride_src, - sao_left_class, sao_offset_val, height); - dst += 8; - src += 8; - width %= 8; - } - - if (width) { - hevc_sao_band_filter_4width_msa(dst, stride_dst, src, stride_src, - sao_left_class, sao_offset_val, height); - } -} - -void ff_hevc_sao_edge_filter_8_msa(uint8_t *dst, const uint8_t *src, - ptrdiff_t stride_dst, - const int16_t *sao_offset_val, - int eo, int width, int height) -{ - ptrdiff_t stride_src = (2 * MAX_PB_SIZE + AV_INPUT_BUFFER_PADDING_SIZE) / sizeof(uint8_t); - - switch (eo) { - case 0: - if (width >> 4) { - hevc_sao_edge_filter_0degree_16multiple_msa(dst, stride_dst, - src, stride_src, - sao_offset_val, - width - (width % 16), - height); - dst += width - (width % 16); - src += width - (width % 16); - width %= 16; - } - - if (width >> 3) { - hevc_sao_edge_filter_0degree_8width_msa(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - dst += 8; - src += 8; - width %= 8; - } - - if (width) { - hevc_sao_edge_filter_0degree_4width_msa(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - } - break; - - case 1: - if (width >> 4) { - hevc_sao_edge_filter_90degree_16multiple_msa(dst, stride_dst, - src, stride_src, - sao_offset_val, - width - (width % 16), - height); - dst += width - (width % 16); - src += width - (width % 16); - width %= 16; - } - - if (width >> 3) { - hevc_sao_edge_filter_90degree_8width_msa(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - dst += 8; - src += 8; - width %= 8; - } - - if (width) { - hevc_sao_edge_filter_90degree_4width_msa(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - } - break; - - case 2: - if (width >> 4) { - hevc_sao_edge_filter_45degree_16multiple_msa(dst, stride_dst, - src, stride_src, - sao_offset_val, - width - (width % 16), - height); - dst += width - (width % 16); - src += width - (width % 16); - width %= 16; - } - - if (width >> 3) { - hevc_sao_edge_filter_45degree_8width_msa(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - dst += 8; - src += 8; - width %= 8; - } - - if (width) { - hevc_sao_edge_filter_45degree_4width_msa(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - } - break; - - case 3: - if (width >> 4) { - hevc_sao_edge_filter_135degree_16multiple_msa(dst, stride_dst, - src, stride_src, - sao_offset_val, - width - (width % 16), - height); - dst += width - (width % 16); - src += width - (width % 16); - width %= 16; - } - - if (width >> 3) { - hevc_sao_edge_filter_135degree_8width_msa(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - dst += 8; - src += 8; - width %= 8; - } - - if (width) { - hevc_sao_edge_filter_135degree_4width_msa(dst, stride_dst, - src, stride_src, - sao_offset_val, height); - } - break; - } -} diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download the Inspiring Song Respond by Travis Greene and His Talented Collaborators.md b/spaces/congsaPfin/Manga-OCR/logs/Download the Inspiring Song Respond by Travis Greene and His Talented Collaborators.md deleted file mode 100644 index 981f3def95e0dc0b2945813be87c2205acf2097c..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download the Inspiring Song Respond by Travis Greene and His Talented Collaborators.md +++ /dev/null @@ -1,133 +0,0 @@ - -

      Download Travis Greene Respond

      -

      If you are looking for a powerful and uplifting gospel song that will inspire you to respond to God's call, you should check out Respond by Travis Greene. This song is a beautiful expression of worship and surrender to God, featuring the amazing voices of Trinity Anderson, D'Nar Young, and Taylor Poole. In this article, we will tell you more about Travis Greene, the meaning and message of Respond, and how you can download it for your personal enjoyment.

      -

      download travis greene respond


      DOWNLOAD 🔗 https://urlca.com/2uO7bj



      -

      Who is Travis Greene?

      -

      Travis Greene is a renowned gospel singer, songwriter, pastor, and producer who has been making waves in the Christian music industry for over a decade. He is known for his passionate and authentic worship style, his dynamic and engaging live performances, and his powerful and uplifting songs that touch millions of lives around the world.

      -

      His background and achievements

      -

      Travis Greene was born in Delaware in 1984, but grew up in Georgia and South Carolina. He had a miraculous start to life, as he was pronounced dead after falling from a four-story building at the age of four. However, God had a different plan for him, and he was revived by his mother's prayer. He started singing at the age of five, and learned to play various instruments as a teenager. He also developed a strong faith in God, and felt called to ministry at an early age.

      -

      Travis Greene released his first album, The More, in 2007, followed by Stretching Out in 2010. However, he gained more recognition and acclaim with his third album, The Hill, which was released in 2015. This album featured his breakthrough hit, Intentional, which topped the Billboard Gospel Airplay chart for 16 weeks. It also earned him his first Grammy nomination for Best Gospel Album.

      -

      Travis Greene continued to soar higher with his fourth album, Crossover: Live from Music City, which was released in 2017. This album featured another smash hit, You Waited, which also reached number one on the Billboard Gospel Airplay chart. It also earned him his second Grammy nomination for Best Gospel Album, as well as 11 Stellar Awards nominations, winning seven of them.

      -

      Travis Greene has also collaborated with many other prominent artists in the gospel and secular music scenes, such as Kirk Franklin, Tasha Cobbs Leonard, Israel Houghton, Tim Godfrey, Tori Kelly, Steffany Gretzinger, and more. He has also performed at many prestigious events and venues, such as the White House, the Essence Festival, the Stellar Awards, the Dove Awards, the Trumpet Awards, and more.

      -

      His musical style and influences

      -

      Travis Greene has a unique musical style that blends contemporary gospel with elements of rock, soul, R&B, hip hop, pop, and Afrobeat. He is influenced by various artists from different genres, such as Fred Hammond, John P. Kee, Kirk Franklin, U2, Coldplay, Lauryn Hill, Bob Marley, Fela Kuti, and more. He is also inspired by his personal experiences with God's grace and power in his life.

      -

      Travis Greene's songs are characterized by catchy melodies, uplifting lyrics, [assistant](#message) rich harmonies, energetic rhythms, and creative arrangements. He uses his music as a tool to share his testimony of God's goodness and faithfulness in his life, as well as to encourage others to trust God and respond to His call. He also uses his music as a platform to bridge cultural and racial gaps in the church and the society. He believes that music is a universal language that can unite people from different backgrounds and beliefs under the banner of love and hope.

      -

      What is Respond?

      -

      Respond is one of the songs from Travis Greene's fifth and latest album, Broken Record, which was released in 2019. This album is a collection of songs that reflect Travis Greene's personal journey of brokenness and healing, as well as his desire to see God's kingdom come on earth as it is in heaven.

      -

      download travis greene respond mp3
      -download travis greene respond video
      -download travis greene respond lyrics
      -download travis greene respond album
      -download travis greene respond song
      -download travis greene respond audio
      -download travis greene respond live
      -download travis greene respond instrumental
      -download travis greene respond feat. trinity anderson
      -download travis greene respond free
      -download travis greene respond gospel music
      -download travis greene respond official video
      -download travis greene respond youtube
      -download travis greene respond music video
      -download travis greene respond broken record
      -download travis greene respond mp4
      -download travis greene respond worship song
      -download travis greene respond 2019
      -download travis greene respond online
      -download travis greene respond full song
      -download travis greene respond chord chart
      -download travis greene respond spotify
      -download travis greene respond itunes
      -download travis greene respond amazon music
      -download travis greene respond apple music
      -download travis greene respond soundcloud
      -download travis greene respond piano tutorial
      -download travis greene respond guitar chords
      -download travis greene respond karaoke
      -download travis greene respond remix
      -download travis greene respond cover
      -download travis greene respond acoustic version
      -download travis greene respond studio version
      -download travis greene respond radio edit
      -download travis greene respond performance track
      -download travis greene respond sheet music
      -download travis greene respond backing track
      -download travis greene respond loop kit
      -download travis greene respond multitrack stems
      -download travis greene respond midi file
      -download travis greene respond ringtone
      -download travis greene respond zip file
      -download travis greene respond mp3 juice
      -download travis greene respond naija gospel music blogspot com[^1^]
      -download travis greene respond gospelminds com[^2^]
      -download travis greene respond praisejamzblog com[^3^]

      -

      The meaning and message of the song

      -

      Respond is a song that expresses Travis Greene's heart of worship and surrender to God, as well as his invitation to others to join him in responding to God's call. The song is based on the biblical passage of Isaiah 6:8, where the prophet Isaiah hears the voice of the Lord asking, "Whom shall I send? And who will go for us?" And Isaiah responds, "Here am I. Send me."

      -

      The song begins with a chorus of voices singing, "We respond to You, we respond to You / We respond to You, we respond to You / We respond to You, we respond to You / We respond to You, we respond to You." This sets the tone and theme of the song, which is a declaration of readiness and willingness to follow God's will and purpose for our lives.

      -

      The song then continues with the verses, where Travis Greene sings about how God has been faithful and gracious to him, even when he was undeserving and unfaithful. He sings, "You've been good to me / You've been good to me / You have shown me mercy / You have shown me grace / And I don't deserve it / No I don't deserve it / But still You love me anyway / So I worship You forever / And I give my life away."

      -

      The song then returns to the chorus, where Travis Greene invites others to join him in responding to God's call. He sings, "So who will go for You? Who will go for You? / Who will go for You? Who will go for You? / We respond to You, we respond to You / We respond to You, we respond to You."

      -

      The song then transitions to the bridge, where Travis Greene leads a powerful and passionate prayer of surrender and commitment to God. He prays, "Lord here I am / Use me for Your glory / Lord here I am / Use me for Your plan / Lord here I am / Use me for Your purpose / Lord here I am / Here I am."

      -

      The song then ends with a repetition of the chorus and the bridge, creating a crescendo of worship and response that leaves the listener in awe of God's love and presence.

      -

      The features and collaborators of the song

      -

      Respond is not only a solo song by Travis Greene, but also a collaborative effort with three other talented singers: Trinity Anderson, D'Nar Young, and Taylor Poole. These three singers are part of Travis Greene's worship team and ministry, Forward City Church, where he serves as the lead pastor along with his wife Jackie.

      -

      Trinity Anderson is a young and gifted singer who has been singing since she was three years old. She has been featured on several songs by Travis Greene, such as See The Light, Good And Loved, Great Jehovah, and more. She has also released her own single, Glimpse, in 2020.

      -

      D'Nar Young is another talented singer who has been singing since he was five years old. He has also been featured on several songs by Travis Greene, such as All Things New, Won't Let Go, Broken Vessels, and more. He has also released his own single, You Are Good, in 2019.

      -

      Taylor Poole is a gifted singer and songwriter who has been singing since she was four years old. She has also been featured on several songs by Travis Greene, such as Be Still, Love Will Always Win, Broken Record, and more. She has also released her own single, Faithful, in 2019.

      -

      These three singers add their unique voices and harmonies to Respond, creating a beautiful blend of sound and spirit that enhances the message and impact of the song.

      -

      The album and video of the song

      -

      Respond is one of the songs from Travis Greene's fifth and latest album, Broken Record, which was released in 2019. This album is a collection of songs that reflect Travis Greene's personal journey of brokenness and healing, as well as his desire to see God's kingdom come on earth as it is in heaven. The album features 11 tracks, including Good And Loved, Won't Let Go, The Breaker, Great Jehovah, and more. The album also features guest appearances by Steffany Gretzinger, DOE, Jekalyn Carr, and more.

      -

      The album received positive reviews from critics and fans alike, who praised Travis Greene's musical diversity, lyrical depth, and spiritual authenticity. The album also earned Travis Greene his third Grammy nomination for Best Gospel Album, as well as four Stellar Awards nominations, winning one of them.

      -

      The video of Respond was released in 2020, and it showcases Travis Greene and his collaborators performing the song live at Forward City Church. The video captures the atmosphere of worship and response that the song creates, as well as the joy and passion of the singers and the audience. The video also features some behind-the-scenes footage of the recording process and the rehearsal sessions. The video has over 10 million views on YouTube, and it has inspired many people to respond to God's call in their lives.

      -

      How to download Respond?

      -

      If you want to download Respond by Travis Greene for your personal enjoyment, you need to consider some factors before you do so. Downloading music can have some benefits and drawbacks, as well as some legal and ethical issues that you need to be aware of. You also need to choose the best platforms and sources to download the song from, to ensure the quality and safety of your download.

      -

      The benefits and drawbacks of downloading music

      -

      Downloading music can have some benefits, such as:

      -
        -
      • You can listen to your favorite songs anytime and anywhere, without relying on internet connection or streaming services.
      • -
      • You can save money by not paying for subscription fees or data charges.
      • -
      • You can create your own playlists and customize your listening experience.
      • -
      • You can support your favorite artists by buying their music or donating to their causes.
      • -
      -

      However, downloading music can also have some drawbacks, such as:

      -
        -
      • You can lose your downloaded music if your device gets damaged, lost, or stolen.
      • -
      • You can run out of storage space on your device if you download too many songs.
      • -
      • You can expose your device to viruses or malware if you download from untrusted sources.
      • -
      • You can miss out on new releases or updates if you don't check for them regularly.
      • -
      -

      The legal and ethical issues of downloading music

      -

      Downloading music can also have some legal and ethical issues that you need to be aware of. Downloading music is not illegal per se, as long as you do it from authorized sources and for personal use only. However, downloading music from unauthorized sources or for commercial use can be illegal and punishable by law. This is because downloading music from unauthorized sources or for commercial use violates the intellectual property rights of the artists and the producers who created the music.

      -

      Downloading music from unauthorized sources or for commercial use can also be unethical and immoral, as it deprives the artists and the producers of their rightful income and recognition. This can affect their livelihoods and their ability to create more music in the future. Downloading music from unauthorized sources or for commercial use can also harm the quality and integrity of the music, as it can be distorted, corrupted, or incomplete.

      -

      The best platforms and sources to download Respond

      -

      If you want to download Respond by Travis Greene legally and ethically, you need to choose the best platforms and sources to download it from. There are many platforms and sources that offer legal and ethical downloads of music, such as:

      - - - - - - - - - - -
      Platform/SourceDescriptionPrice
      iTunesA digital media store that offers downloads of songs, albums, videos, podcasts, audiobooks, and more.$1.29 per song
      Amazon MusicA digital music service that offers downloads of songs, albums, playlists, stations, podcasts, and more.$1.29 per song
      Google Play MusicA digital music service that offers downloads of songs, albums, [assistant](#message) playlists, stations, podcasts, and more.$1.29 per song
      SpotifyA digital music service that offers downloads of songs, albums, playlists, stations, podcasts, and more.$9.99 per month for premium subscription
      PandoraA digital music service that offers downloads of songs, albums, playlists, stations, podcasts, and more.$9.99 per month for premium subscription
      YouTube MusicA digital music service that offers downloads of songs, albums, playlists, stations, videos, and more.$9.99 per month for premium subscription
      SoundCloudA digital music platform that offers downloads of songs, albums, playlists, podcasts, and more.Free or variable depending on the artist
      BandcampA digital music platform that offers downloads of songs, albums, merchandises, and more.Free or variable depending on the artist
      -

      These platforms and sources offer legal and ethical downloads of Respond by Travis Greene, as they have the permission and license from the artist and the producer to distribute the song. They also offer high-quality and safe downloads of the song, as they have the original and complete version of the song. They also support the artist and the producer by paying them a fair share of the revenue from the downloads.

      -

      Conclusion

      -

      In conclusion, Respond by Travis Greene is a powerful and uplifting gospel song that will inspire you to respond to God's call in your life. It is a song that expresses Travis Greene's heart of worship and surrender to God, as well as his invitation to others to join him in responding to God's call. It is also a song that features the amazing voices of Trinity Anderson, D'Nar Young, and Taylor Poole, who are part of Travis Greene's worship team and ministry. It is also a song that is part of Travis Greene's fifth and latest album, Broken Record, which is a collection of songs that reflect his personal journey of brokenness and healing.

      -

      If you want to download Respond by Travis Greene for your personal enjoyment, you need to choose the best platforms and sources to download it from. You need to consider the benefits and drawbacks of downloading music, as well as the legal and ethical issues of downloading music. You also need to choose the platforms and sources that offer legal and ethical downloads of Respond, such as iTunes, Amazon Music, Google Play Music, Spotify, Pandora, YouTube Music, SoundCloud, Bandcamp, and more.

      -

      We hope that this article has helped you learn more about Travis Greene, the meaning and message of Respond, and how to download it legally and ethically. We also hope that you have enjoyed listening to this song and that it has blessed you in some way. If you have any questions or feedback, please feel free to leave them in the comments section below. Thank you for reading and have a wonderful day!

      -

      FAQs

      -

      Here are some frequently asked questions about Respond by Travis Greene:

      -
        -
      1. Who wrote Respond?
      2. -

        Respond was written by Travis Greene, along with Trinity Anderson, D'Nar Young, Taylor Poole, and Kenneth Leonard Jr.

        -
      3. When was Respond released?
      4. -

        Respond was released on October 11, 2019, as part of Travis Greene's fifth album, Broken Record.

        -
      5. What genre is Respond?
      6. -

        Respond is a contemporary gospel song that blends elements of rock, soul, R&B, hip hop, pop, and Afrobeat.

        -
      7. What is the key and tempo of Respond?
      8. -

        Respond is in the key of C major and has a tempo of 120 beats per minute.

        -
      9. Where can I watch the video of Respond?
      10. -

        You can watch the video of Respond on YouTube, where it has over 10 million views. You can also watch it on Travis Greene's official website, where you can find more information about him and his music.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Empire Earth 2 The Top 10 World Map Downloads You Need to Try.md b/spaces/congsaPfin/Manga-OCR/logs/Empire Earth 2 The Top 10 World Map Downloads You Need to Try.md deleted file mode 100644 index 28a86882c26418ca3f55e314a69bba866bf18b42..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Empire Earth 2 The Top 10 World Map Downloads You Need to Try.md +++ /dev/null @@ -1,111 +0,0 @@ -
      -

      Empire Earth 2 World Map Download: A Guide for RTS Fans

      -

      Empire Earth 2 is a real-time strategy game that was released in 2005 by Mad Doc Software and Vivendi Universal Games. The game features 15 epochs, 14 different civilizations, and three playable campaigns that cover various historical periods and scenarios. Empire Earth 2 is known for its depth and complexity, as well as its innovative features such as the Picture-in-Picture window, the War Planner, and the Citizen Manager.

      -

      One of the most popular features of Empire Earth 2 is the world map download, which is a mod that allows players to play on a huge map that represents the entire globe. The world map mod was created by fans of the game who wanted to have a more realistic and immersive experience of conquering the world. The mod can be downloaded from various websites, such as [15](https://www.moddb.com/games/empire-earth-2/mods), [16](https://ee.heavengames.com/downloads/lister.php?category=ee2_mods), and [3](https://www.moddb.com/games/empire-earth-2/downloads). In this article, we will show you how to download and install the world map mod for Empire Earth 2, as well as give you some benefits and drawbacks of playing on it, and some tips and tricks to help you succeed.

      -

      empire earth 2 world map download


      Download 🗸 https://urlca.com/2uOejI



      -

      How to Download and Install the World Map Mod for Empire Earth 2

      -

      The world map mod for Empire Earth 2 is not an official patch or update, so you will need to install it manually. Here are the steps to follow:

      -
        -
      1. Make sure you have Empire Earth 2 and its expansion pack, The Art of Supremacy, installed on your computer. You will also need to have the latest version of Unofficial Patch 1.5, which is a fan-made patch that fixes many bugs and adds new features to the game. You can download it from [12](https://www.gamespot.com/games/empire-earth-ii/reviews/).
      2. -
      3. Choose a world map mod that you want to download. There are several versions available, each with different sizes, details, and features. Some of the most popular ones are:
          -
        • World map 1000x1000 by GHPL: This is a huge map that covers the entire globe with realistic terrain and resources. It can support up to 10 players and has no trees or animals in most areas. You can download it from [2](https://forum.ee2.eu/viewtopic.php?t=1806).
        • -
        • Realistic Earth by Ipomoea batatas: This is a smaller map that focuses on Europe, Africa, Asia, and Australia. It has more trees and animals than the previous one, as well as more historical accuracy and balance. You can download it from [16](https://ee.heavengames.com/downloads/lister.php?category=ee2_mods).
        • -
        • Empire Earth IV by RGV1: This is a complete overhaul of Empire Earth 2 that adds new units, buildings, technologies, civilizations, epochs, and gameplay modes. It also includes a world map that is based on Google Maps data and has realistic climate zones and weather effects. You can download it from [15](https://www.moddb.com/games/empire-earth-2/mods).
        • -
        -
      4. -
      5. Once you have downloaded your chosen world map mod, extract it to your Empire Earth 2 folder. This is usually located at C:\Program Files (x86)\Sierra\Empire Earth II or C:\Program Files (x86)\GOG.com\Empire Earth II Gold Edition.
      6. -
      7. Open your Empire Earth 2 folder and find the file called EE2X_config.cfg. This is a configuration file that determines which files are loaded when you launch the game. Open it with Notepad or any other text editor and look for the line that says "moddir =". Change the value after the equal sign to the name of the folder that contains your world map mod. For example, if you downloaded the World map 1000x1000 by GHPL mod, change the line to "moddir = World map 1000x1000 by GHPL". Save and close the file.
      8. -
      9. Launch Empire Earth 2 and enjoy playing on the world map mod. You can access it from the Custom Scenario menu or the Random Map menu, depending on the mod you chose.
      10. -
      -

      What are the Benefits and Drawbacks of Playing on the World Map Mod?

      -

      Playing on the world map mod for Empire Earth 2 can be a lot of fun and challenging, but it also has some advantages and disadvantages that you should be aware of. Here are some of them:

      -

      empire earth 2 custom world map
      -empire earth 2 world map mod
      -empire earth 2 real world map
      -empire earth 2 world map scenario
      -empire earth 2 world map editor
      -empire earth 2 world map multiplayer
      -empire earth 2 world map patch
      -empire earth 2 world map free download
      -empire earth 2 world map megafileupload
      -empire earth 2 world map forum.ee2.eu
      -empire earth 2 world map heaven games
      -empire earth 2 world map mod db
      -empire earth 2 world map korea institute of fusion energy
      -empire earth 2 world map battle for greece
      -empire earth 2 world map conquest of europe
      -empire earth 2 world map western europe
      -empire earth 2 world map untitled
      -empire earth 2 world map mission properties
      -empire earth 2 world map build and destroy
      -empire earth 2 world map number of players
      -empire earth 2 world map terrain
      -empire earth 2 world map continents
      -empire earth 2 world map islands
      -empire earth 2 world map oceans
      -empire earth 2 world map improvement
      -empire earth 2 world map epochs
      -empire earth 2 world map history
      -empire earth 2 world map conqueror
      -empire earth 2 world map empires
      -empire earth 2 world map expansion pack
      -empire earth 2 world map art of supremacy
      -empire earth 2 world map file name
      -empire earth 2 world map file format
      -empire earth 2 world map damaged file
      -empire earth 2 world map wrong file format
      -empire earth 2 world map compatible with aos
      -empire earth 2 world map resize option
      -empire earth 2 world map bugs and crashes
      -empire earth 2 world map tree limitation
      -empire earth 2 world map spawn border tree

      - - - - - - - - - - - - - - - - - - - - - -
      BenefitsDrawbacks
      - You can experience a more realistic and immersive simulation of world history and geography.- You may encounter performance issues or crashes due to the large size and complexity of the map.
      - You can explore and conquer different regions and continents with different terrain, resources, and climate.- You may have difficulty finding and managing your units and buildings on such a huge map.
      - You can test your skills and strategies against other players or AI opponents on a global scale.- You may have to deal with more diplomacy, trade, and warfare options that can be overwhelming or confusing.
      - You can customize and modify the map to suit your preferences and play style.- You may have compatibility issues or conflicts with other mods or patches that you have installed.
      -

      What are Some Tips and Tricks for Playing on the World Map Mod?

      -

      If you want to have a better and more enjoyable experience playing on the world map mod for Empire Earth 2, here are some tips and tricks that you can follow:

      -
        -
      • Use the Picture-in-Picture window to keep an eye on different areas of the map. You can also use it to quickly jump to a location by double-clicking on it.
      • -
      • Use the War Planner to plan and execute your attacks or defenses. You can also use it to coordinate with your allies or spy on your enemies.
      • -
      • Use the Citizen Manager to automate your resource gathering and building construction. You can also use it to assign priorities and tasks to your citizens.
      • -
      • Use hotkeys and shortcuts to speed up your actions and commands. You can also customize them in the Options menu.
      • -
      • Use scouts, spies, satellites, and aircrafts to explore and reveal the map. You can also use them to harass or sabotage your enemies.
      • -
      • Use terrain, weather, and time of day to your advantage. You can also use them to hide or ambush your enemies.
      • -
      • Use diplomacy, trade, and alliances to gain allies or enemies. You can also use them to gain resources, information, or support.
      • -
      • Use different units, buildings, technologies, and civilizations to suit your strategy and epoch. You can also use them to counter or surprise your enemies.
      • -
      -

      Conclusion

      -

      The world map mod for Empire Earth 2 is a great way to enhance your gaming experience and challenge yourself in a realistic and immersive simulation of world history and geography. However, it also has some drawbacks and difficulties that you should be prepared for. If you follow our guide on how to download and install the world map mod, as well as our tips and tricks on how to play on it, you will have a lot of fun and satisfaction conquering the world in Empire Earth 2.

      -

      FAQs

      -

      Here are some frequently asked questions about the world map mod for Empire Earth 2:

      -

      Q: Is the world map mod compatible with multiplayer?

      -

      A: Yes, you can play on the world map mod with other players online or via LAN. However, you will need to make sure that all players have the same version of Empire Earth 2, Unofficial Patch 1.5, and world map mod installed. Otherwise, you may encounter errors or desyncs during gameplay.

      -

      Q: Is the world map mod compatible with other mods?

      -

      A: It depends on the mod. Some mods may work fine with the world map mod, while others may cause conflicts or crashes. You will need to check the compatibility of each mod before installing it. If you encounter any problems, you may need to uninstall or disable some mods. You can also use the moddir option in the EE2X_config.cfg file to switch between different mods easily.

      -

      Q: Is the world map mod compatible with different resolutions?

      -

      A: Yes, you can play on the world map mod with different resolutions, such as 1920x1080, 2560x1440, or 3840x2160. However, you may need to adjust some settings in the Options menu, such as the UI scale, the camera zoom, and the graphics quality, to optimize your gameplay and performance.

      -

      Q: Is the world map mod updated regularly?

      -

      A: It depends on the mod. Some mods are updated frequently by their creators, while others are discontinued or abandoned. You can check the latest updates and news of each mod on their respective websites or forums. You can also contact the mod creators directly if you have any questions or feedback.

      -

      Q: Is the world map mod safe to download and install?

      -

      A: Generally, yes. The world map mod for Empire Earth 2 is created by fans of the game who want to share their work and passion with other players. However, you should always be careful when downloading and installing any files from the internet, as they may contain viruses or malware that can harm your computer. You should always scan the files with a reliable antivirus software before opening them. You should also backup your original game files before installing any mods, in case something goes wrong or you want to revert to the vanilla version.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/First Refuge Z Mod APK - A Thrilling Zombie Survival Game.md b/spaces/congsaPfin/Manga-OCR/logs/First Refuge Z Mod APK - A Thrilling Zombie Survival Game.md deleted file mode 100644 index ce98e1788b27971fdc978beeacd2121dcae6ad8f..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/First Refuge Z Mod APK - A Thrilling Zombie Survival Game.md +++ /dev/null @@ -1,98 +0,0 @@ -
      -

      Download First Refuge: Z Mod APK - A Survival Game with Unlimited Money

      -

      Do you love survival games where you have to build your shelter, recruit survivors, fight zombies, and explore the wasteland? If yes, then you should try First Refuge: Z, a strategy game developed by 37GAMES. In this game, you have to survive in a post-apocalyptic world where zombies and raiders are everywhere. You have to manage your resources, upgrade your facilities, train your troops, and defend your base from enemies. You can also join alliances and cooperate with other players to survive together.

      -

      However, if you want to enjoy the game without any limitations, you should download First Refuge: Z mod apk, a modified version of the game that gives you unlimited money, unlimited shopping, increased movement speed, and no ads. With this mod apk, you can build your shelter faster, recruit more survivors, buy more weapons and items, and have more fun in the game. In this article, we will tell you what is First Refuge: Z, why you should download First Refuge: Z mod apk, and how to download and install it on your device.

      -

      download first refuge z mod apk


      Download Filehttps://urlca.com/2uO9Ot



      -

      What is First Refuge: Z?

      -

      First Refuge: Z is a strategy game that combines base building, resource management, zombie fighting, and exploration. The game is set in a world where a virus outbreak has turned most people into zombies. You are one of the few survivors who have to find a safe place to live. You have to build your shelter underground, where you can grow crops, produce energy, store water, and research technologies. You also have to recruit other survivors who have different skills and abilities. You can assign them to different tasks such as farming, mining, crafting, fighting, etc.

      -

      But building your shelter is not enough. You also have to protect it from zombies and raiders who will try to attack you and loot your resources. You have to train your troops and equip them with weapons and armor. You can also use traps and turrets to defend your base. You can also go out and explore the wasteland, where you can find more resources, items, secrets, and dangers. You can also join alliances and chat with other players online. You can help each other by sending resources, troops, or reinforcements.

      -

      Features of First Refuge: Z

      -

      First Refuge: Z has many features that make it an exciting and addictive game. Here are some of them:

      -

      Build your shelter

      -

      You can build your shelter underground by digging tunnels and rooms. You can customize your shelter by placing different facilities such as farms, generators, warehouses, labs, workshops, etc. You can also upgrade your facilities to increase their efficiency and capacity. You have to balance your production and consumption of resources such as food, water, electricity, metal, etc.

      -

      Recruit survivors

      -

      You can recruit survivors who have different skills and abilities. You can assign them to different tasks such as farming, mining, crafting, fighting, etc. You can also level up your survivors and improve their attributes such as health, attack, defense, speed, etc. You can also equip them with weapons and armor that you can craft or find in the wasteland.

      -

      How to download first refuge z mod apk for free
      -Download first refuge z mod apk latest version
      -First refuge z mod apk unlimited money and resources
      -First refuge z mod apk gameplay and features
      -Download first refuge z mod apk for android and ios
      -First refuge z mod apk review and rating
      -First refuge z mod apk tips and tricks
      -Download first refuge z mod apk offline mode
      -First refuge z mod apk hack and cheat
      -Download first refuge z mod apk from lygiang.net[^1^]
      -First refuge z mod apk best strategy and guide
      -Download first refuge z mod apk no root and no ads
      -First refuge z mod apk update and patch notes
      -Download first refuge z mod apk with obb data file
      -First refuge z mod apk comparison and alternatives
      -Download first refuge z mod apk on pc and mac
      -First refuge z mod apk multiplayer and co-op mode
      -Download first refuge z mod apk with high graphics quality
      -First refuge z mod apk secrets and easter eggs
      -Download first refuge z mod apk from google play store
      -First refuge z mod apk achievements and rewards
      -Download first refuge z mod apk with unlimited gems and coins
      -First refuge z mod apk zombies and survivors mode
      -Download first refuge z mod apk with custom skins and weapons
      -First refuge z mod apk challenges and missions
      -Download first refuge z mod apk from apkpure.com
      -First refuge z mod apk bugs and glitches fix
      -Download first refuge z mod apk with voice chat and social features
      -First refuge z mod apk base building and defense mode
      -Download first refuge z mod apk with premium items and vip access
      -First refuge z mod apk events and tournaments
      -Download first refuge z mod apk with fast download speed and easy installation
      -First refuge z mod apk clans and alliances mode
      -Download first refuge z mod apk with realistic sound effects and music
      -First refuge z mod apk heroes and skills mode
      -Download first refuge z mod apk from happymod.com
      -First refuge z mod apk ranking and leaderboard system
      -Download first refuge z mod apk with anti-ban and anti-virus protection
      -First refuge z mod apk exploration and adventure mode
      -Download first refuge z mod apk with support for different languages and regions

      -

      Fight zombies and raiders

      -

      You have to fight zombies and raiders who will try to attack you and loot your resources. You have to train your troops and equip them with weapons and armor. You can also use traps and turrets to defend your base. You can also go out and explore the wasteland, where you can find more resources, items, secrets, and dangers.

      Explore the wasteland

      -

      You can also explore the wasteland, where you can find more resources, items, secrets, and dangers. You can use vehicles to travel faster and carry more loot. You can also encounter different events and scenarios that will test your skills and decisions. You can also find other survivors who can join you or fight you. You can also discover hidden locations and secrets that will reveal more about the world and the virus.

      -

      Why download First Refuge: Z mod apk?

      -

      First Refuge: Z is a fun and challenging game, but it also has some limitations and drawbacks. For example, you have to wait for a long time to build or upgrade your facilities, recruit or train your survivors, or explore the wasteland. You also have to spend real money to buy more resources, items, or premium features. You also have to watch ads to get some rewards or bonuses. And you also have to deal with the slow movement speed of your troops and vehicles.

      -

      But what if you could enjoy the game without any of these limitations and drawbacks? What if you could have unlimited money, unlimited shopping, increased movement speed, and no ads? Well, you can do that by downloading First Refuge: Z mod apk, a modified version of the game that gives you all these benefits and more. With this mod apk, you can build your shelter faster, recruit more survivors, buy more weapons and items, and have more fun in the game.

      -

      Benefits of First Refuge: Z mod apk

      -

      First Refuge: Z mod apk has many benefits that make it a better version of the game. Here are some of them:

      -

      Unlimited money

      -

      With First Refuge: Z mod apk, you will have unlimited money in the game. Money is the main currency in the game that you can use to buy resources, items, weapons, armor, vehicles, etc. You can also use money to speed up your building or upgrading process, recruit or train your survivors, or explore the wasteland faster. With unlimited money, you can buy anything you want without worrying about running out of money.

      -

      Unlimited shopping

      -

      With First Refuge: Z mod apk, you will have unlimited shopping in the game. Shopping is the feature that allows you to buy resources, items, weapons, armor, vehicles, etc. from the shop. You can also buy premium features such as VIP membership, special packages, or exclusive offers. With unlimited shopping, you can buy anything you want without worrying about the price or the availability.

      -

      Increased movement speed

      -

      With First Refuge: Z mod apk, you will have increased movement speed in the game. Movement speed is the feature that determines how fast your troops and vehicles can move in the game. The higher the movement speed, the faster you can reach your destination or escape from danger. With increased movement speed, you can save time and energy in the game.

      -

      No ads

      -

      With First Refuge: Z mod apk, you will have no ads in the game. Ads are the annoying pop-ups that appear in the game from time to time. They interrupt your gameplay and force you to watch them to get some rewards or bonuses. They also consume your data and battery life. With no ads, you can enjoy the game without any distractions or interruptions.

      -

      How to download and install First Refuge: Z mod apk?

      -

      If you are interested in downloading and installing First Refuge: Z mod apk on your device, you can follow these simple steps:

      -

      Steps to download and install First Refuge: Z mod apk

      -
        -
      1. Click on this link to download First Refuge: Z mod apk file on your device.
      2. -
      3. Go to your device settings and enable unknown sources to allow installation of apps from unknown sources.
      4. -
      5. Locate the downloaded file in your file manager and tap on it to start the installation process.
      6. -
      7. Follow the instructions on the screen to complete the installation process.
      8. -
      9. Launch the game and enjoy First Refuge: Z mod apk with unlimited money, unlimited shopping, increased movement speed, and no ads.
      10. -
      -

      Conclusion

      -

      First Refuge: Z is a strategy game that combines base building, resource management, zombie fighting, and exploration. It is a fun and challenging game that will test your skills and decisions in a post-apocalyptic world. However, if you want to enjoy the game without any limitations or drawbacks, you should download First Refuge: Z mod apk, a modified version of the game that gives you unlimited money, unlimited shopping, increased movement speed, and no ads. With this mod apk, you can build your shelter faster, recruit more survivors, buy more weapons and items, and have more fun in the game.

      -

      FAQs

      -

      Here are some frequently asked questions about First Refuge: Z mod apk:

      -
        -
      1. Is First Refuge: Z mod apk safe to download and install?
      2. -

        Yes, First Refuge: Z mod apk is safe to download and install. It does not contain any viruses, malware, or spyware. It also does not require any root or jailbreak to run. However, you should always download it from a trusted source and scan it with an antivirus before installing it.

        -
      3. Will First Refuge: Z mod apk work on my device?
      4. -

        First Refuge: Z mod apk should work on most Android devices that have Android 4.4 or higher. However, some devices may not be compatible or may experience some issues. If you encounter any problems, you can try to clear the cache, restart the device, or reinstall the game.

        -
      5. Will First Refuge: Z mod apk affect my game progress or account?
      6. -

        No, First Refuge: Z mod apk will not affect your game progress or account. You can play the game normally and save your progress online. You can also log in with your Facebook or Google account and sync your data across different devices. However, you should be careful not to use the mod apk in online modes or events, as you may get banned by the game developers.

        -
      7. Can I update First Refuge: Z mod apk?
      8. -

        Yes, you can update First Refuge: Z mod apk whenever there is a new version available. However, you should always backup your data before updating, as you may lose your progress or settings. You should also download the latest version of the mod apk from the same source as before.

        -
      9. Can I play First Refuge: Z mod apk offline?
      10. -

        Yes, you can play First Refuge: Z mod apk offline. You can enjoy the game without any internet connection. However, some features such as online chat, alliances, or events may not be available offline. You also need to connect to the internet once in a while to save your progress online.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Goat Simulator The Game That Lets You Live Your Dream of Being a Goat.md b/spaces/congsaPfin/Manga-OCR/logs/Goat Simulator The Game That Lets You Live Your Dream of Being a Goat.md deleted file mode 100644 index 4f3dedfa8565e8cfd393b3134c7e54e679905411..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Goat Simulator The Game That Lets You Live Your Dream of Being a Goat.md +++ /dev/null @@ -1,258 +0,0 @@ -
      -

      Download the Goat Simulator: A Guide to the Craziest Game Ever

      -

      Have you ever wondered what it would be like to be a goat? To roam around the city, headbutt people and objects, lick everything, and cause as much destruction as possible? Well, wonder no more, because Goat Simulator is the game for you. Goat Simulator is a hilarious and absurd game that lets you unleash your inner goat and have fun in a sandbox environment. In this article, we will tell you everything you need to know about Goat Simulator, how to download it, and how to play it.

      -

      What is Goat Simulator?

      -

      Goat Simulator is a game that simulates the life of a goat, but not in a realistic or educational way. Instead, it is a parody of other simulation games, such as Farming Simulator or Flight Simulator, that exaggerates the physics and mechanics of being a goat. The game is full of bugs, glitches, and nonsense, but that's what makes it so entertaining and hilarious. You can do anything you want as a goat, from jumping on trampolines, riding bikes, flying jetpacks, joining cults, fighting zombies, robbing banks, and even going to space.

      -

      download the goat simulator


      Download →→→ https://urlca.com/2uO4yk



      -

      The history and features of Goat Simulator

      -

      Goat Simulator was developed by Coffee Stain Studios, a Swedish indie game studio. It was originally created as a joke for an internal game jam in 2014, but after receiving positive feedback from YouTube videos and social media, the developers decided to release it as a full game on Steam. Since then, Goat Simulator has become a cult hit, selling millions of copies and receiving several updates and expansions that add new maps, modes, characters, and features. Some of the features of Goat Simulator are:

      -
        -
      • You can be a goat
      • -
      • You can get points for wrecking stuff - brag to your friends that you're the alpha goat
      • -
      • You can customize your goat with different outfits, accessories, mutations, and abilities
      • -
      • You can explore different environments, such as a suburban town, an amusement park, a medieval castle, a desert island, a sci-fi space station, and more
      • -
      • You can interact with various NPCs, animals, vehicles, objects, and events
      • -
      • You can play with or against other players online or locally
      • -
      • You can create your own goats, levels, missions, game modes, and more with Steam Workshop support
      • -
      -

      The gameplay and objectives of Goat Simulator

      -

      The gameplay of Goat Simulator is simple: you control a goat and you can do whatever you want. There are no rules, no goals, no missions, no storylines, no consequences. Just pure chaos and fun. You can explore the world at your own pace, or you can try to complete some of the achievements and quests that are scattered around the map. Some of these include:

      -
        -
      • Performing stunts and tricks with your goat
      • -
      • Collecting golden goat statues that unlock new goats
      • -
      • Finding hidden trophies that give you special powers
      • -
      • Destroying specific objects or buildings
      • -
      • Messing with other characters or animals
      • -
      • Participating in mini-games or events
      • -
      • Discovering secrets or easter eggs
      • -
      -

      The pros and cons of Goat Simulator

      -

      Goat Simulator is not a game for everyone. It is a game that embraces its flaws and absurdity, and that can be either appealing or annoying depending on your taste and mood. Here are some of the pros and cons of Goat Simulator that you should consider before downloading it:

      - - - - - - - - - - - - - - - - - - - - - -
      ProsCons
      It is hilarious and unpredictableIt is buggy and glitchy
      It is creative and originalIt is repetitive and shallow
      It is easy and relaxingIt is challenging and frustrating
      It is cheap and accessibleIt is outdated and unsupported
      -

      In summary, Goat Simulator is a game that you will either love or hate, depending on what you expect from it. If you are looking for a serious, realistic, or polished game, then Goat Simulator is not for you. But if you are looking for a silly, ridiculous, or fun game, then Goat Simulator might be just what you need.

      -

      How to download Goat Simulator?

      -

      If you are interested in trying Goat Simulator for yourself, you will be happy to know that it is available on various platforms and devices. You can download Goat Simulator on your PC, Mac, Linux, Android, iOS, Xbox One, Xbox 360, PlayStation 4, PlayStation 3, or Nintendo Switch. However, the game might have different features, updates, or prices depending on the platform or device you choose. Here are the steps to download Goat Simulator on two of the most popular platforms: Steam and Google Play.

      -

      How to download the goat simulator for free
      -Goat simulator free download for PC
      -Goat simulator download for Android
      -Goat simulator download for Mac
      -Goat simulator download for Windows 10
      -Goat simulator download for iOS
      -Goat simulator download for Linux
      -Goat simulator download for Steam
      -Goat simulator download APK
      -Goat simulator download full version
      -Goat simulator download cracked
      -Goat simulator download mods
      -Goat simulator download online
      -Goat simulator download size
      -Goat simulator download latest version
      -Goat simulator free trial download
      -Goat simulator demo download
      -Goat simulator beta download
      -Goat simulator update download
      -Goat simulator DLC download
      -Goat simulator cheats download
      -Goat simulator hacks download
      -Goat simulator tips and tricks download
      -Goat simulator gameplay video download
      -Goat simulator review video download
      -Goat simulator soundtrack download
      -Goat simulator wallpaper download
      -Goat simulator theme song download
      -Goat simulator best moments download
      -Goat simulator funny moments download
      -Goat simulator epic moments download
      -Goat simulator fails compilation download
      -Goat simulator glitches compilation download
      -Goat simulator secrets and easter eggs download
      -Goat simulator achievements and trophies download
      -Goat simulator guides and walkthroughs download
      -Goat simulator codes and coupons download
      -Goat simulator discounts and deals download
      -Goat simulator bundles and packages download
      -Goat simulator gift cards and vouchers download
      -Download the goat zombie apocalypse simulator
      -Download the goat payday heist simulator
      -Download the goat space exploration simulator
      -Download the goat MMO RPG simulator
      -Download the goat superhero action simulator
      -Download the goat horror survival simulator
      -Download the goat parkour stunt simulator
      -Download the goat ragdoll physics simulator
      -Download the goat city destruction simulator
      -Download the goat farm life simulator

      -

      The platforms and devices that support Goat Simulator

      -

      Goat Simulator was first released on Steam for Windows, Mac, and Linux in 2014. Since then, it has been ported to other platforms and devices by different developers. The following table shows the platforms and devices that support Goat Simulator, as well as the release date, developer, price, and rating of the game on each platform or device.

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Platform/DeviceRelease DateDeveloperPriceRating
      Steam (Windows/Mac/Linux)April 1, 2014Coffee Stain Studios$9.99 USDMixed (7/10)
      Android/iOSSeptember 17, 2014Coffee Stain Publishing$4.99 USDPositive (4.3/5)
      Xbox One/Xbox 360April 17, 2015/April 17, 2015Coffee Stain Studios/Double Eleven Limited$9.99 USD/$9.99 USDMixed (3.5/5)/Mixed (3/5)
      PlayStation 4/PlayStation 3August 11, 2015/August 11, 2015Coffee Stain Studios/Double Eleven Limited$9.99 USD/$9.99 USDMixed (3.5/5)/Mixed (3/5)
      Nintendo SwitchJanuary 23, 2019 >Coffee Stain Studios/Double Eleven Limited >$29.99 USD >Mixed (3/5) >

      The steps to download Goat Simulator on Steam >

      If you want to download Goat Simulator on your PC, Mac, or Linux, you will need to use Steam, a digital distribution platform that allows you to buy and play games online. To download Goat Simulator on Steam, you will need to follow these steps:

      1. Create a Steam account or log in to your existing account.
      2. Download and install the Steam client on your computer.
      3. Launch the Steam client and search for Goat Simulator in the store.
      4. Add Goat Simulator to your cart and proceed to checkout.
      5. Select your payment method and confirm your purchase.
      6. Go to your library and click on Goat Simulator to start downloading it.
      7. Wait for the download to finish and then click on Goat Simulator to play it.

      The steps to download Goat Simulator on Google Play

      -

      If you want to download Goat Simulator on your Android device, you will need to use Google Play, a digital distribution platform that allows you to buy and download apps and games on your device. To download Goat Simulator on Google Play, you will need to follow these steps:

      -
        -
      1. Open the Google Play app on your device or go to the Google Play website on your browser.
      2. -
      3. Search for Goat Simulator in the app or website.
      4. -
      5. Tap on Goat Simulator and then tap on the green Install button.
      6. -
      7. Select your payment method and confirm your purchase.
      8. -
      9. Wait for the download to finish and then tap on Goat Simulator to play it.
      10. -
      -

      How to play Goat Simulator?

      -

      Now that you have downloaded Goat Simulator, you might be wondering how to play it. Well, there is no right or wrong way to play Goat Simulator, as the game is meant to be a sandbox where you can do whatever you want. However, there are some basic controls and commands that you should know, as well as some tips and tricks that can help you cause more chaos as a goat. Here are some of them:

      -

      The controls and commands of Goat Simulator

      -

      The controls and commands of Goat Simulator vary depending on the platform or device you are using. However, the general functions of each control or command are similar. Here is a table that shows the default controls and commands of Goat Simulator for PC, Android, Xbox One, PlayStation 4, and Nintendo Switch:

      - - - - - - - - - - - - - - - - - - - -
      FunctionPCAndroidXbox OnePlayStation 4Nintendo Switch
      MoveWASD keysLeft joystickLeft stickLeft stickLeft stick
      JumpSpacebarA buttonA buttonX buttonB button
      SprintShift keyB buttonB buttonO buttonA button
      Baa/Slow motionR key/Tab keyC button/D buttonX button/Y buttonSquare button/Triangle buttonX button/Y button
      Lick/Attach/Detach/Use item/Cancel item selection/Select item/Rotate item/Zoom item in/out/Throw item/Special ability/Ragdoll mode/Pause menu/Inventory menu/Mutator menu/Scoreboard menu/Chat menu/Screenshot mode/Camera mode/Camera zoom in/out/Camera rotate left/right/Camera rotate up/down/Camera reset position/Camera toggle HUD/Camera save screenshot/Camera exit mode/Restart level/Quit game/Confirm/Cancel/Back/Next/Previous/Skip tutorial/Toggle fullscreen/Toggle sound/Toggle music/Toggle mouse sensitivity/Toggle invert Y-axis/Toggle vibration/Toggle gore/Toggle FPS counter/Toggle developer console/Toggle Steam overlay/Toggle Steam screenshot/Toggle Steam chat/Toggle Steam browser/Toggle Steam friends list/Toggle Steam music player/Toggle Steam voice chat/Toggle Steam big picture mode/Toggle Steam VR mode/Toggle Steam controller configuration mode/Toggle Steam controller desktop mode/Toggle Steam controller gyro aiming mode/Toggle Steam controller rumble emulation mode/Toggle Steam controller mouse region mode/Toggle Steam controller action set switching mode/Toggle Steam controller haptic feedback intensity mode/Toggle Steam controller trigger analog output mode/Toggle Steam controller trigger click threshold mode/Toggle Steam controller joystick deadzone mode/Toggle Steam controller joystick response curve mode/Toggle Steam controller trackpad sensitivity mode/Toggle Steam controller trackpad haptics mode/Toggle Steam controller trackpad click action mode/Toggle Steam controller trackpad edge spin radius mode/Toggle Steam controller trackpad edge spin speed mode/Toggle Steam controller trackpad edge spin acceleration mode/Toggle Steam controller trackpad edge spin deceleration mode/Toggle Steam controller trackpad edge spin friction mode/Toggle Steam controller trackpad edge spin dampening mode/Toggle Steam controller trackpad edge spin inertia mode/Toggle Steam controller trackpad edge spin coasting mode/Toggle Steam controller trackpad edge spin snap angle mode/ Toggle Steam controller trackpad edge spin snap mode/Toggle Steam controller trackpad edge spin snap modeE key/Q key/E key/Q key/E key/Q key/E key/Q key/E key/Q key/E key/Q key/E key/Q key/E key/Q key/E key/Q key/1-9 keys/F key/ESC key/I key/M key/Tab key/T key/F12 key/C key/Mouse wheel up/Mouse wheel down/A key/D key/W key/S key/R key/H key/F11 key/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/Camera button/F5 key/F10 key/Enter/ESC/Backspace/Right arrow/Left arrow/Spacebar/Alt+Enter/Mute/Sound down/Sound up/Mouse sensitivity down/Mouse sensitivity up/Y/N/V/FPS/GRAVE/Shift+Tab/F12/Y/U/B/V/R/L/G/K/H/J/I/O/P/N/M/,/.//?/;/:'/"///Shift+/Ctrl+/Alt+/Shift+Ctrl+/Shift+Alt+/Ctrl+Alt+/Shift+Ctrl+Alt+/Shift+Ctrl+Alt+Gyroscope/Rumble/Mouse region/Action set/Haptic feedback/Trigger analog output/Trigger click threshold/Joystick deadzone/Joystick response curve/Trackpad sensitivity/Trackpad haptics/Trackpad click action/Trackpad edge spin radius/Trackpad edge spin speed/Trackpad edge spin acceleration/Trackpad edge spin deceleration/Trackpad edge spin friction/Trackpad edge spin dampening/Trackpad edge spin inertia/Trackpad edge spin coasting/Trackpad edge spin snap angleTap to move/Tap to jump/Tap to sprint/Tap to baa/Tap to slow motion/Drag to look around/Swipe to lick/Swipe to attach/Swipe to detach/Swipe to use item/Swipe to cancel item selection/Swipe to select item/Pinch to rotate item/Pinch to zoom item in/Pinch to zoom item out/Swipe to throw item/Tap to special ability/Tap to ragdoll mode/Tap to pause menu/Tap to inventory menu/Tap to mutator menu/Tap to scoreboard menu/Tap to chat menu/Tap to screenshot mode/Tap to camera mode/Pinch to camera zoom in/Pinch to camera zoom out/Swipe to camera rotate left/Swipe to camera rotate right/Swipe to camera rotate up/Swipe to camera rotate down/Tap to camera reset position/Tap to camera toggle HUD/Tap to camera save screenshot/Tap to camera exit mode/Tap to restart level/Tap to quit game/Tap to confirm/Tap to cancel/Swipe left to back/Swipe right to next/Swipe left or right to previous/Tap or swipe up or down to skip tutorial/Tap to toggle fullscreen/Tap to toggle sound/Tap to toggle music/Tap to toggle mouse sensitivity/Tap to toggle invert Y-axis/Tap to toggle vibration/Tap to toggle gore/Tap to toggle FPS counter/Tap to toggle developer console/Tap to toggle Steam overlay/Tap to toggle Steam screenshot/Tap to toggle Steam chat/Tap to toggle Steam browser/Tap to toggle Steam friends list/Tap to toggle Steam music player/Tap to toggle Steam voice chat/Tap to toggle Steam big picture mode/Tap to toggle Steam VR mode/Tap to toggle Steam controller configuration mode/Tap to toggle Steam controller desktop mode/Tap to toggle Steam controller gyro aiming mode/Tap to toggle Steam controller rumble emulation mode/Tap to toggle Steam controller mouse region mode/Tap to toggle Steam controller action set switching mode/Tap to toggle Steam controller haptic feedback intensity mode/Tap to toggle Steam controller trigger analog output mode/Tap to toggle Steam controller trigger click threshold mode/Tap to toggle Steam controller joystick deadzone mode/Tap to toggle Steam controller joystick response curve mode/Tap to toggle Steam controller trackpad sensitivity mode/Tap to toggle Steam controller trackpad haptics mode/Tap to toggle Steam controller trackpad click action mode/Tap to toggle Steam controller trackpad edge spin radius mode/Tap to toggle Steam controller trackpad edge spin speed mode/Tap to toggle Steam controller trackpad edge spin acceleration mode/Tap to toggle Steam controller trackpad edge spin deceleration mode/Tap to toggle Steam controller trackpad edge spin friction mode/Tap to toggle Steam controller trackpad edge spin dampening mode/Tap to toggle Steam controller trackpad edge spin inertia mode/Tap to toggle Steam controller trackpad edge spin coasting mode/Tap to toggle Steam controller trackpad edge spin snap angle modeRT button/LT button/RT button/LT button/RT button/LT button/RT button/LT button/RT button/LT button/RT button/LT button/RT button/LT button/RT button/LT button/RT button/LT button/RB button/LB button/Menu button/View button/Y button/X button/A button/B button/D-pad up/D-pad down/D-pad left/D-pad right/D-pad up/D-pad down/D-pad left/D-pad right/D-pad up/D-pad down/D-pad left/D-pad right/D-pad up/D-pad down/D-pad left-D-pad right/D-pad up/D-pad down/D-pad left-D-pad right/D-pad up/D-pad down/D-pad left-D-pad right/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick clickR2 button/L2 button/R2 button/L2 button/R2 button/L2 button/R2 button/L2 button/R2 button/L2 button/R2 button/L2 button/R2 button/L2 button/R2 button/L2 button/R2 button/L2 button/R1 button/L1 button/Options button/Touch pad button/Triangle button/Square button/Cross button/Circle button/D-pad up/D-pad down/D-pad left/D-pad right/D-pad up/D-pad down/D-pad left-D-pad right/D-pad up/D-pad down/D-pad left-D-pad right/D-pad up/D-pad down/D-pad left-D-pad right/D-pad up/D-pad down/D-pad left-D-pad right/D-pad up/D-pad down/D-pad left-D-pad right/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick clickZR button/ZL button/ZR button/ZL button/ZR button/ZL button/ZR button/ZL button/ZR button/ZL button/ZR button/ZL button/ZR button/ZL button/ZR button/ZL button/ZR button/ZL button/R button/L button/+ button/- button/X button/Y button/A button/B button/D-pad up/D-pad down/D-pad left-D-pad right/D-pad up/D-pad down-D-pad left-D-pad right/D-pad up/D-pad down-D-pad left-D-pad right/D-pad up/D-pad down-D-pad left-D-pad right/D-pad up/D-pad down-D-pad left-D-pad right/D-pad up/D-pad down-D-pad left-D-pad right/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click/Left stick click/Right stick click
      -

      Of course, you can also customize the controls and commands of Goat Simulator to suit your preferences and needs. You can do this by going to the settings menu and changing the key bindings, sensitivity, inversion, vibration, and other options.

      -

      The tips and tricks to cause more chaos as a goat

      -

      As we mentioned before, there is no specific goal or objective in Goat Simulator, other than to have fun and cause chaos. However, if you want to maximize your enjoyment and destruction as a goat, here are some tips and tricks that you can try:

      -
        -
      • Use your tongue to lick and drag objects, people, or animals. You can use them as weapons, projectiles, shields, or vehicles.
      • -
      • Use your headbutt to knock over or break things. You can also use it to launch yourself into the air or bounce off walls.
      • -
      • Use your special ability to activate your goat's unique power. Depending on the goat you choose, you can do things like spit water, shoot fireballs, summon goats, explode, fly, or turn into a demon.
      • -
      • Use your ragdoll mode to flop around and slide on surfaces. You can also use it to avoid damage or get out of sticky situations.
      • -
      • Use your slow motion mode to make everything look more epic and dramatic. You can also use it to aim better or dodge faster.
      • -
      • Use your inventory to store and use items that you find or collect. You can use them to enhance your goat's abilities, change your appearance, or interact with the environment.
      • -
      • Use your mutators to modify your goat's characteristics, behavior, or appearance. You can mix and match different mutators to create your own custom goat.
      • -
      • Use your score multiplier to increase the points you get for causing mayhem. You can increase your multiplier by performing combos, stunts, tricks, or achievements.
      • -
      • Use your environment to your advantage. You can find and use various objects, vehicles, animals, or events that can help you cause more chaos or have more fun.
      • -
      -

      The secrets and easter eggs of Goat Simulator

      -

      Goat Simulator is a game that is full of secrets and easter eggs that reference other games, movies, shows, memes, or pop culture. Some of these are obvious and easy to find, while others are hidden and hard to discover. Here are some of the secrets and easter eggs that you can look for in Goat Simulator:

      -
        -
      • The Flappy Goat mini-game: You can find a TV in the Coffee Stain Studios building that lets you play a parody of Flappy Bird with a goat head.
      • -
      • The Minecraft block: You can find a block of dirt with grass on top in the construction site that looks like a block from Minecraft.
      • -
      • The Teenage Mutant Ninja Turtles: You can find four turtles wearing colored masks and eating pizza in a sewer near the gas station.
      • -
      • The The Hitchhiker's Guide to the Galaxy: You can find a towel and a sign that says "Don't Panic" in the crashed UFO near the wind turbines.
      • -
      • The Deadmau5 concert: You can find a concert stage with a DJ wearing a mouse head near the amusement park.
      • -
      • The Goat of the Hill: You can find a throne made of goats on top of a hill near the farm.
      • -
      • The Goatborn: You can find a helmet with horns and a sword near the goat castle that lets you shout like the Dragonborn from Skyrim.
      • -
      • The Goat Simulator MMO Simulator: You can find a portal to a fantasy world with other players, classes, quests, and bosses near the power lines.
      • -
      • The GoatZ: You can find a portal to a zombie apocalypse world with survival, crafting, and infection mechanics near the graveyard.
      • -
      • The Payday: You can find a portal to a heist world with masks, weapons, and missions near the train station.
      • -
      • The Waste of Space: You can find a portal to a space world with lasers, aliens, and planets near the museum.
      • -
      -

      Conclusion

      -

      Goat Simulator is a game that defies logic, physics, and common sense. It is a game that lets you be a goat and do whatever you want. It is a game that is full of bugs, glitches, and nonsense, but that's what makes it so entertaining and hilarious. It is a game that you can download on various platforms and devices, and play with or against other players online or locally. It is a game that you can customize, modify, and create with your own imagination. It is a game that you will either love or hate, depending on your taste and mood.

      -

      If you are looking for a serious, realistic, or polished game, then Goat Simulator is not for you. But if you are looking for a silly, ridiculous, or fun game, then Goat Simulator might be just what you need. So what are you waiting for? Download Goat Simulator today and unleash your inner goat!

      -

      Summary of the main points

      -
        -
      • Goat Simulator is a parody of other simulation games that lets you be a goat and cause chaos in a sandbox environment.
      • -
      • Goat Simulator has various features, modes, maps, characters, and items that make it creative and original.
      • -
      • Goat Simulator has no rules, goals, missions, or consequences. You can do anything you want as a goat.
      • -
      • Goat Simulator is available on various platforms and devices. You can download it on Steam or Google Play with different steps.
      • -
      • Goat Simulator has different controls and commands depending on the platform or device you use. You can also customize them to suit your preferences.
      • -
      • Goat Simulator has some tips and tricks that can help you cause more chaos as a goat. You can use your tongue, headbutt, special ability, ragdoll mode, slow motion mode, inventory, mutators, score multiplier, and environment to your advantage.
      • -
      • Goat Simulator has some secrets and easter eggs that reference other games, movies, shows, memes, or pop culture. You can find them by exploring the world or completing certain actions.
      • -
      -

      Call to action and recommendation

      -

      If you enjoyed this article and want to learn more about Goat Simulator or other games like it, please visit our website for more information and reviews. You can also subscribe to our newsletter to get the latest updates and offers on games and gaming accessories. And if you have any questions or feedback about Goat Simulator or this article, please leave us a comment below or contact us via email or social media. We would love to hear from you!

      -

      Frequently Asked Questions

      -

      Here are some of the most frequently asked questions about Goat Simulator:

      -

      Q: Is Goat Simulator suitable for children?

      -

      A: Goat Simulator is rated T for Teen by the ESRB (Entertainment Software Rating Board) for crude humor, violence, and drug reference. It contains some scenes and content that may not be appropriate for younger audiences, such as blood, gore, profanity, alcohol, drugs, and sexual innuendo. Therefore, parental discretion is advised when playing or watching Goat Simulator with children.

      -

      Q: How long is Goat Simulator?

      -

      A: Goat Simulator does not have a fixed length or duration, as it is a sandbox game that does not have a linear storyline or progression. You can play Goat Simulator for as long or as short as you want, depending on your interest and curiosity. However, if you want to complete all the achievements and quests in the game, you will need to spend several hours exploring and experimenting with the game.

      -

      Q: How can I get more goats in Goat Simulator?

      -

      A: Goat Simulator has a variety of goats that you can unlock and play with, each with their own appearance, abilities, and personality. You can unlock more goats by collecting golden goat statues that are hidden throughout the map, or by completing certain achievements or quests that require you to do specific actions or tasks. Some of the goats that you can unlock are:

      -
        -
      • Angel Goat: You get this goat by not causing any damage for 5 minutes.
      • -
      • Devil Goat: You get this goat by bringing 5 people to the pentagram near the power lines.
      • -
      • Queen Goat: You get this goat by sitting on the throne in the goat castle.
      • -
      • Ripped Goat: You get this goat by working out at the gym near the hotel.
      • -
      • Robot Goat: You get this goat by collecting all 6 batteries in the power plant.
      • -
      • Space Goat: You get this goat by finding a special beacon in the field near the wind turbines.
      • -
      -

      Q: How can I get more maps in Goat Simulator?

      -

      A: Goat Simulator has several maps that you can explore and cause chaos in, each with their own theme, environment, and secrets. You can access more maps by downloading the updates and expansions that are available for the game, or by creating your own maps with Steam Workshop support. Some of the maps that you can access are:

      -
        -
      • Goatville: This is the original map of Goat Simulator, where you start as a goat in a suburban town.
      • -
      • Goat City Bay: This is an update that adds a new map with a coastal city, an amusement park, and a skate park.
      • -
      • Goat MMO Simulator: This is an expansion that adds a new map with a fantasy world, where you can choose from different classes and races of goats.
      • -
      • GoatZ: This is an expansion that adds a new map with a zombie apocalypse world, where you have to survive, craft, and infect others.
      • -
      • Payday: This is an expansion that adds a new map with a heist world, where you can wear masks, use weapons, and rob banks.
      • -
      • Waste of Space: This is an expansion that adds a new map with a space world, where you can use lasers, fight aliens, and visit planets.
      • -
      -

      Q: How can I play Goat Simulator with friends?

      -

      A: Goat Simulator has a multiplayer mode that allows you to play with or against other players online or locally. You can join or host a server with up to 16 players online, or you can play split-screen with up to 4 players locally. You can also chat with other players using text or voice chat. To play multiplayer mode, you will need to select it from the main menu and choose your preferred options.

      -

      Q: Where can I find more information about Goat Simulator?

      -

      A: If you want to find more information about Goat Simulator, such as news, updates, guides, tips, tricks, secrets, easter eggs, reviews, or feedback, you can visit the following sources:

      -
        -
      • The official website of Goat Simulator: [https://www.goat-simulator.com/]
      • -
      • The official wiki of Goat Simulator: [https://goat-simulator.fandom.com/wiki/Goat_Simulator_Wiki]
      • -
      • The official Steam page of Goat Simulator: [https://store.steampowered.com/app/265930/Goat_Simulator/]
      • -
      • The official Google Play page of Goat Simulator: [https://play.google.com/store/apps/details?id=com.coffeestainstudios.goatsimulator&hl=en_US&gl=US]
      • -
      • The official YouTube channel of Coffee Stain Studios: [https://www.youtube.com/user/CoffeeStainStudios]
      • -
      • The official Twitter account of Coffee Stain Studios: [https://twitter.com/Coffee_Stain]
      • -
      • The official Facebook page of Coffee Stain Studios: [https://www.facebook.com/CoffeeStainStudios/]
      • -
      • The official Reddit community of Goat Simulator: [https://www.reddit.com/r/GoatSimulator/]
      • -
      -

      We hope that this article has helped you learn more about Goat Simulator, how to download it, and how to play it. If you have any questions or feedback about Goat Simulator or this article, please leave us a comment below or contact us via email or social media. We would love to hear from you!

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Punball MOD APK How to Launch Magic Balls and Crush Monsters.md b/spaces/congsaPfin/Manga-OCR/logs/Punball MOD APK How to Launch Magic Balls and Crush Monsters.md deleted file mode 100644 index 361248c61f394682c57853403a8b3a3d6b0d668a..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Punball MOD APK How to Launch Magic Balls and Crush Monsters.md +++ /dev/null @@ -1,88 +0,0 @@ -
      -

      Download Punball Mod APK - A Fun and Magical Game for Android

      -

      If you are looking for a fun and magical game to play on your Android device, you should check out Punball. Punball is a casual arcade game that combines pinball and magic. You play as a female wizard who has to protect her planet from evil invaders by shooting magical balls at them. The game has simple controls, colorful graphics, and addictive gameplay.

      -

      What is Punball?

      -

      Punball is a game developed by Bandishare, a Korean studio that specializes in casual games. The game was released in 2023 and has received positive reviews from players and critics alike. The game has over 10 million downloads on Google Play Store and has a rating of 4.5 out of 5 stars.

      -

      download punball mod apk


      Download Zip 🆓 https://urlca.com/2uObeb



      -

      The game has a simple premise: you have to defend your planet Barren from evil creatures that want to destroy it. You do this by shooting magical balls at them using your finger. You can swipe on the screen to aim and release to shoot. You can also use different skills and items to enhance your power and performance.

      -

      The game has various modes and levels to keep you entertained. You can play in story mode, where you have to complete missions and defeat bosses. You can also play in endless mode, where you have to survive as long as possible against waves of enemies. You can also play in multiplayer mode, where you can compete with other players online or offline.

      -

      Why Download Punball Mod APK?

      -

      Punball is a free-to-play game that you can download from Google Play Store or other sources. However, if you want to enjoy the game to its fullest potential, you might want to download Punball Mod APK instead.

      -

      Punball Mod APK is a modified version of the game that gives you unlimited money, gems, skills, and menu options. This means that you can unlock all the features of the game without spending any real money or waiting for long hours. You can also customize your game settings and preferences according to your liking. You can also enjoy the game without any ads or interruptions.

      -

      Punball Mod APK is easy to download and install on your Android device. You just need to follow a few simple steps and you will be ready to play the game in no time. However, you should be careful about where you download the modded version of the game, as some sources might contain viruses or malware that can harm your device or steal your personal information. You should only download Punball Mod APK from trusted and reliable sources, such as [this one].

      -

      How to Download Punball Mod APK?

      -

      If you want to download Punball Mod APK, you can follow these steps:

      -
        -
      1. Click on the link [here] to go to the download page of Punball Mod APK.
      2. -
      3. Tap on the download button and wait for the file to be downloaded on your device.
      4. -
      5. Go to your device settings and enable the option to install apps from unknown sources.
      6. -
      7. Locate the downloaded file in your file manager and tap on it to start the installation process.
      8. -
      9. Follow the instructions on the screen and wait for the installation to be completed.
      10. -
      11. Launch the game and enjoy playing Punball Mod APK.
      12. -
      -

      Note: You might need to uninstall the original version of Punball before installing the modded version, as they might conflict with each other. You should also make sure that your device has enough storage space and meets the minimum requirements to run the game smoothly.

      -

      punball mod apk unlimited money
      -punball mod apk free gems
      -punball mod apk latest version
      -punball mod apk android 1
      -punball mod apk hack
      -punball mod apk no ads
      -punball mod apk offline
      -punball mod apk menu
      -punball mod apk 1 hit
      -punball mod apk 3.6.0
      -punball mod apk 3.6.1
      -punball mod apk revdl
      -punball mod apk happymod
      -punball mod apk rexdl
      -punball mod apk an1
      -punball mod apk download for android
      -punball mod apk download link
      -punball mod apk download free
      -punball mod apk download latest
      -punball mod apk download 2023
      -how to download punball mod apk
      -where to download punball mod apk
      -download game punball mod apk
      -download game hayvl io punball mod apk
      -download game bandishare com punball mod apk
      -download game findmeapk com punball mod apk
      -download game hack version of punball mod apk
      -download game cracked version of punball mod apk
      -download game unlimited coins and gems in punball mod apk
      -download game west gunfighter in punball mod apk
      -tải game punball mod apk vô hạn tiền kim cương skill menu
      -tải game câu đố kết hợp rpg trong punball mod apk
      -tải game hóa thân thành nữ pháp sư trong punball mod apk
      -tải game bảo vệ hành tinh barren trong punball mod apk
      -tải game tung ra các quả bóng ma thuật trong punball mod apk
      -tải game cứu vùng đất của thần zeus trong punball mod apk
      -tải game phần phụ của archero trong punball mod apk
      -tải game cơ hội trở thành một phù thủy xinh đẹp trong punball mod apk
      -tải game giải đố chất lượng cao dành cho thiết bị di động trong punball mod apk
      -tải game có lối chơi cực kỳ đơn giản trong punball mod apk

      -

      What are Some Tips and Tricks for Playing Punball?

      -

      Punball is a fun and easy game to play, but it can also be challenging and addictive. If you want to improve your skills and performance in the game, you might want to follow some tips and tricks that can help you out. Here are some of them:

      - | Tip | Explanation | |-----|------------| | Aim carefully | The direction and speed of your shots can make a big difference in hitting your targets and scoring points. You should aim carefully and adjust your swipe according to the situation. You can also use the walls and obstacles to bounce your balls and hit multiple enemies at once. | | Use skills wisely | You have access to different skills that can help you in various ways, such as freezing enemies, increasing damage, or creating explosions. You should use these skills wisely and strategically, as they have cooldown times and limited uses. You should also upgrade your skills regularly to make them more effective. | | Collect items | You can find various items in the game that can give you extra benefits, such as coins, gems, health, or power-ups. You should collect these items whenever you see them, as they can help you in different ways. You can also use these items to buy new balls, costumes, or accessories for your character. | | Complete missions | The game has various missions that you can complete to earn rewards, such as coins, gems, or stars. These missions can range from defeating a certain number of enemies, using a certain skill, or reaching a certain score. You should try to complete these missions as much as possible, as they can help you progress faster in the game. | | Play with friends | The game has a multiplayer mode that allows you to play with other players online or offline. You can either cooperate with them or compete against them in different modes and levels. Playing with friends can make the game more fun and exciting, as well as give you more chances to earn rewards and bonuses. |

      Conclusion

      -

      Punball is a fun and magical game that you can play on your Android device. It is a casual arcade game that combines pinball and magic. You play as a female wizard who has to protect her planet from evil invaders by shooting magical balls at them. The game has simple controls, colorful graphics, and addictive gameplay.

      -

      If you want to enjoy the game to its fullest potential, you should download Punball Mod APK instead of the original version. Punball Mod APK is a modified version of the game that gives you unlimited money, gems, skills, and menu options. This means that you can unlock all the features of the game without spending any real money or waiting for long hours. You can also customize your game settings and preferences according to your liking. You can also enjoy the game without any ads or interruptions.

      -

      Punball Mod APK is easy to download and install on your Android device. You just need to follow a few simple steps and you will be ready to play the game in no time. However, you should be careful about where you download the modded version of the game, as some sources might contain viruses or malware that can harm your device or steal your personal information. You should only download Punball Mod APK from trusted and reliable sources, such as [this one].

      -

      If you want to improve your skills and performance in the game, you should follow some tips and tricks that can help you out. You should aim carefully, use skills wisely, collect items, complete missions, and play with friends. These tips and tricks can make the game more fun and exciting, as well as help you progress faster in the game.

      -

      Punball is a fun and magical game that you can play on your Android device. It is a casual arcade game that combines pinball and magic. You play as a female wizard who has to protect her planet from evil invaders by shooting magical balls at them. The game has simple controls, colorful graphics, and addictive gameplay.

      -

      So what are you waiting for? Download Punball Mod APK now and enjoy playing this amazing game. You will not regret it!

      -

      FAQs

      -

      Here are some frequently asked questions and answers about the game:

      -
        -
      1. Q: Is Punball Mod APK safe to download and install?
      2. -
      3. A: Yes, Punball Mod APK is safe to download and install, as long as you download it from a trusted and reliable source, such as [this one]. However, you should always be careful about downloading any modded version of any game, as some sources might contain viruses or malware that can harm your device or steal your personal information. You should also scan the file with an antivirus software before installing it.
      4. -
      5. Q: Do I need to root my device to use Punball Mod APK?
      6. -
      7. A: No, you do not need to root your device to use Punball Mod APK. You just need to enable the option to install apps from unknown sources in your device settings.
      8. -
      9. Q: Can I play Punball Mod APK offline?
      10. -
      11. A: Yes, you can play Punball Mod APK offline, as the game does not require an internet connection to run. However, you might need an internet connection to access some features or modes of the game, such as multiplayer mode or online leaderboards.
      12. -
      13. Q: Can I update Punball Mod APK?
      14. -
      15. A: Yes, you can update Punball Mod APK whenever there is a new version available. However, you might need to uninstall the previous version of the modded game before installing the new one, as they might conflict with each other. You should also backup your game data before updating, as you might lose your progress or settings.
      16. -
      17. Q: How can I contact the developer of Punball?
      18. -
      19. A: You can contact the developer of Punball by visiting their official website [here] or by sending them an email at [this address]. You can also follow them on their social media accounts [here] and [here]. You can also leave a review or a comment on their Google Play Store page [here].
      20. -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Shadow Fight 2 Special Edition APK MOD Unlimited Money and Eastern Adventure.md b/spaces/congsaPfin/Manga-OCR/logs/Shadow Fight 2 Special Edition APK MOD Unlimited Money and Eastern Adventure.md deleted file mode 100644 index 6ddbc3fca610c7ac34d816c14f7302191055d53e..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Shadow Fight 2 Special Edition APK MOD Unlimited Money and Eastern Adventure.md +++ /dev/null @@ -1,104 +0,0 @@ - -

      Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito Download

      -

      If you are a fan of fighting games, you must have heard of Shadow Fight 2, one of the most popular and successful games in this genre. But did you know that there is a special edition of this game that offers more features, more fun, and more challenges? In this article, we will tell you everything you need to know about Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito, how to download and install it, and why you should play it. Let's get started!

      -

      shadow fight 2 special edition apk mod dinheiro infinito download


      Download File ————— https://urlca.com/2uOajr



      -

      What is Shadow Fight 2 Special Edition?

      -

      Shadow Fight 2 Special Edition is a premium version of the famous fighting game from the studio NEKKI with a new storyline. In this game, you will play as Sensei, a legendary warrior who needs to fight with Titan, the ruler of the Shadow World, to defeat him and free the land of the Shadows from his tyranny. You will travel on new locations with an eastern atmosphere, face new enemies, and wield new weapons. You will also enjoy a smoother gameplay with no ads and no energy system that limits your progress.

      -

      Features of Shadow Fight 2 Special Edition

      -

      Shadow Fight 2 Special Edition has many features that make it stand out from the original game. Here are some of them:

      -

      Premium version with no ads and energy

      -

      One of the main advantages of Shadow Fight 2 Special Edition is that it is a paid game that does not have any ads or energy system that can interrupt your gameplay. You can play as much as you want without any distractions or limitations. You can also access all the content and features of the game without any extra costs.

      -

      New storyline with Sensei and Titan

      -

      Another feature of Shadow Fight 2 Special Edition is that it has a new storyline that follows the adventures of Sensei, the master of Shadow Fight 2's main character. You will learn more about his past, his motives, and his relationship with Titan, the ultimate enemy of the game. You will also discover new secrets and mysteries about the Shadow World and its inhabitants.

      -

      New locations and weapons

      -

      Shadow Fight 2 Special Edition also offers new locations and weapons that add more variety and excitement to your gameplay. You will explore different regions of the Shadow World, such as the Old Wasteland, the Underworld, and the Gates of Shadows. You will also use new weapons, such as kusarigama, nunchaku, sai, and katana, each with its own unique moves and combos.

      -

      How to download and install Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito?

      -

      If you want to download and install Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito, you need to follow these steps:

      -

      Requirements and compatibility

      -

      Before you download and install Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito, you need to make sure that your device meets these requirements:

      -
        -
      • Android version: 4.1 or higher
      • -
      • RAM: 1 GB or more
      • -
      • Storage space: 200 MB or more
      • -
      • Internet connection: required for some features
      • -
      -

      You also need to enable unknown sources on your device settings to allow the installation of third-party apps.

      -

      Steps to download and install

      -

      After you have checked the requirements and compatibility, you can proceed to download and install Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito by following these steps:

      -

      shadow fight 2 special edition mod apk unlimited money and gems download
      -shadow fight 2 special edition hack apk download free for android
      -shadow fight 2 special edition premium apk mod download latest version
      -shadow fight 2 special edition mod apk tudo infinito download mediafıre
      -shadow fight 2 special edition full apk mod download offline
      -shadow fight 2 special edition mod apk unlimited everything download 2021
      -shadow fight 2 special edition cracked apk mod download no root
      -shadow fight 2 special edition mega mod apk download android 1
      -shadow fight 2 special edition mod apk free download rexdl
      -shadow fight 2 special edition mod apk unlimited energy download revdl
      -shadow fight 2 special edition mod apk max level download apkpure
      -shadow fight 2 special edition hack mod apk download highly compressed
      -shadow fight 2 special edition mod apk all weapons unlocked download
      -shadow fight 2 special edition mod apk unlimited coins and gems download
      -shadow fight 2 special edition mod apk obb download for pc
      -shadow fight 2 special edition mod apk unlimited orbs download happymod
      -shadow fight 2 special edition mod apk all levels unlocked download
      -shadow fight 2 special edition hack mod apk download latest version 2020
      -shadow fight 2 special edition mod apk unlimited money and gems free download
      -shadow fight 2 special edition hack version download for android
      -shadow fight 2 special edition mod menu apk download no ads
      -shadow fight 2 special edition hack apk free download ios
      -shadow fight 2 special edition pro apk mod download with titan
      -shadow fight 2 special edition modded apk download new update
      -shadow fight 2 special edition cheat mod apk download unlimited health
      -shadow fight 2 special edition god mode apk download no verification
      -shadow fight 2 special edition hacked apk download link in description
      -shadow fight 2 special edition vip mod apk download all unlocked
      -shadow fight 2 special edition original apk + mod (unlimited money) + data for android free download
      -shadow fight 2 special edition hack tool apk download without survey

      -
        -
      1. Click on this link to download the APK file of Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito: [Download Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito]
      2. -
      3. Wait for the download to finish and then locate the file on your device storage.
      4. -
      5. Tap on the file and follow the instructions to install the game on your device.
      6. -
      7. Launch the game and enjoy playing Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito.
      8. -
      -

      Tips and tricks to play Shadow Fight 2 Special Edition

      -

      If you want to play Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito like a pro, you need to know some tips and tricks that can help you improve your skills and win more battles. Here are some of them:

      -
        -
      • Learn the basic controls and moves of the game. You can use the virtual joystick to move, the punch and kick buttons to attack, and the block button to defend. You can also perform different combos by combining different buttons and directions.
      • -
      • Upgrade your weapons and armor regularly. You can use the money and gems that you earn from winning battles or from the mod to buy new equipment or improve your existing ones. You can also customize your weapons and armor with different enchantments and skins.
      • -
      • Use your special abilities wisely. You can activate your special abilities by filling up the blue bar at the top of the screen. You can choose from different abilities, such as shadow form, magic, ranged weapons, or perks. Each ability has its own advantages and disadvantages, so use them according to the situation.
      • -
      • Practice and challenge yourself. You can practice your skills by playing in survival mode, where you have to fight against endless waves of enemies. You can also challenge yourself by playing in hard mode, where you have to face stronger opponents with higher health and damage.
      • -
      -

      Why should you play Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito?

      -

      You might be wondering why you should play Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito instead of the original game or other fighting games. Well, here are some reasons why you should give it a try:

      -

      Unlimited money and gems

      -

      One of the best features of Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito is that it gives you unlimited money and gems that you can use to buy anything you want in the game. You don't have to worry about running out of resources or spending real money to get them. You can enjoy the game without any restrictions or limitations.

      -

      Enhanced graphics and sound effects

      -

      Another reason why you should play Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito is that it has enhanced graphics and sound effects that make the game more realistic and immersive. You will appreciate the details and animations of the characters, weapons, and environments. You will also hear the sounds of punches, kicks, slashes, and explosions that add more excitement and intensity to your gameplay.

      -

      Challenging and addictive gameplay

      -

      The last reason why you should play Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito is that it has a challenging and addictive gameplay that will keep you hooked for hours. You will face different enemies with different skills and styles, from ninjas and samurais to demons and titans. You will also have to master different weapons and abilities, from swords and axes to fireballs and shurikens. You will never get bored or tired of playing this game.

      -

      Conclusion

      -

      In conclusion, Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito is a great fighting game that offers more features, more fun, and more challenges than the original game. It has a new storyline with Sensei and Titan, a premium version with no ads and energy, new locations and weapons, unlimited money and gems, enhanced graphics and sound effects, and challenging and addictive gameplay. If you are a fan of fighting games, you should definitely download and install Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito on your device today.

      -

      FAQs

      -

      Here are some frequently asked questions about Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito:

      -
        -
      1. Is Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito safe to download and install?
        -Yes, Yes, Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito is safe to download and install, as long as you use the link provided in this article. The APK file is scanned and verified by antivirus software and does not contain any malware or viruses. However, you should always be careful when downloading and installing any third-party apps from unknown sources, as they may harm your device or compromise your privacy.
      2. -
      3. What is the difference between Shadow Fight 2 and Shadow Fight 2 Special Edition?
        -Shadow Fight 2 and Shadow Fight 2 Special Edition are both fighting games from the same studio, but they have some differences. Shadow Fight 2 is a free game that has ads and an energy system that limits your gameplay. Shadow Fight 2 Special Edition is a paid game that does not have any ads or energy system and offers more features, such as a new storyline, new locations, new weapons, and unlimited money and gems.
      4. -
      5. How can I get more money and gems in Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito?
        -You don't have to worry about getting more money and gems in Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito, as the mod gives you unlimited amounts of them. You can use them to buy and upgrade anything you want in the game, such as weapons, armor, abilities, and skins. You can also earn more money and gems by winning battles, completing quests, and opening chests.
      6. -
      7. How can I unlock all the weapons and locations in Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito?
        -You can unlock all the weapons and locations in Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito by progressing through the game's storyline. You will unlock new weapons and locations as you defeat new enemies and bosses. You can also use the money and gems that you have to buy new weapons and locations from the shop.
      8. -
      9. How can I contact the developers of Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito?
        -You can contact the developers of Shadow Fight 2 Special Edition APK Mod Dinheiro Infinito by visiting their official website or their social media pages. You can also send them an email or leave a review on their app store page. You can find their contact information below:

        -
          -
        • Website: [NEKKI - Official Website]
        • -
        • Email: support@nekki.mail.helpshift.com
        • -
        • Facebook: [NEKKI - Home | Facebook]
        • -
        • Twitter: [NEKKI (@nekki_com) | Twitter]
        • -
        -
      10. -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/What is IJ Scan Utility and Why You Need It.md b/spaces/congsaPfin/Manga-OCR/logs/What is IJ Scan Utility and Why You Need It.md deleted file mode 100644 index 9f14fe622a100798ac0d88a55b87ce851a21326f..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/What is IJ Scan Utility and Why You Need It.md +++ /dev/null @@ -1,106 +0,0 @@ - -

      What is IJ Scan Utility APK and How to Use It?

      -

      Introduction

      -

      If you have a Canon printer or scanner and want to scan photos and documents easily, you might want to try IJ Scan Utility APK. This is a free photography app that allows you to connect your Canon device to your Android phone or tablet and scan images with just one click. In this article, we will explain what IJ Scan Utility APK is, what features it offers, how to download and install it, and how to use it.

      -

      What is IJ Scan Utility?

      -

      IJ Scan Utility is a scanner software developed by Canon Inc. that works with Canon printers and scanners. It enables you to scan multiple documents at one time, scan images larger than the platen, edit images, attach images to email, and access online product information. It also lets you customize the output settings and stitch files together. It is one of the official software for image management that you need to use if you have a Canon device.

      -

      ij scan utility apk


      Download Zip ––– https://urlca.com/2uObIB



      -

      What is an APK file?

      -

      An APK file is an Android Package file that contains all the files and code needed to install and run an app on an Android device. It is similar to an EXE file on Windows or a DMG file on Mac. You can download APK files from various sources online, such as Google Play Store, third-party websites, or directly from the app developer. However, you need to enable the option to install apps from unknown sources on your device settings before you can install an APK file.

      -

      Features of IJ Scan Utility APK

      -

      One-click scanning

      -

      One of the main features of IJ Scan Utility APK is that it allows you to scan photos and documents with just one click. You don't need to open any other app or program on your device or computer. You just need to launch the app, select the scan type, and click Scan. The app will automatically detect your Canon device and start scanning.

      -

      Customizable settings

      -

      You can also adjust the settings of your scan according to your preferences and needs. You can choose the resolution, color mode, file format, file name, save location, and more. You can also crop, rotate, enhance, or apply filters to your scanned image before saving or sharing it.

      -

      Image stitching

      -

      If you want to scan an image that is larger than the platen of your scanner, you can use the image stitching feature of IJ Scan Utility APK. This feature allows you to scan multiple parts of an image separately and then combine them into one seamless image. You can also use this feature to create panoramas or collages from multiple images.

      -

      Online product information

      -

      Another feature of IJ Scan Utility APK is that it gives you access to online product information for your Canon device. You can check the specifications, manuals, troubleshooting guides, FAQs, and more. You can also contact Canon support or register your product online.

      -

      How to Download and Install IJ Scan Utility APK

      -

      Find your printer or scanner model on Canon's website

      -

      To download and install IJ Scan Utility APK, you first need to find your printer or scanner model on Canon's official website. Note

      that not all Canon devices are compatible with IJ Scan Utility APK. You can check the compatibility list here. Once you find your model, click on it to go to the product page.

      -

      ij scan utility download for windows
      -ij scan utility mac free
      -ij scan utility lite for canon printers
      -ij scan utility not working
      -ij scan utility update
      -ij scan utility software for pc
      -ij scan utility offline installer
      -ij scan utility alternative
      -ij scan utility how to use
      -ij scan utility settings
      -ij scan utility pdf
      -ij scan utility wireless
      -ij scan utility canon mx490
      -ij scan utility canon mg3600
      -ij scan utility canon ts3122
      -ij scan utility canon mg2522
      -ij scan utility canon tr4520
      -ij scan utility canon mx922
      -ij scan utility canon mg3620
      -ij scan utility canon ts3322
      -ij scan utility canon ts3120
      -ij scan utility canon g3010
      -ij scan utility canon g2010
      -ij scan utility canon g3000
      -ij scan utility canon g4010
      -ij scan utility canon e410
      -ij scan utility canon e470
      -ij scan utility canon e510
      -ij scan utility canon e560
      -ij scan utility canon e600
      -ij scan utility canon mp287
      -ij scan utility canon mp237
      -ij scan utility canon mp258
      -ij scan utility canon mp280
      -ij scan utility canon mp495
      -ij scan utility canon mg2500 series
      -ij scan utility canon mg2900 series
      -ij scan utility canon mg3000 series
      -ij scan utility canon mg3500 series
      -ij scan utility canon mg5500 series
      -ij scan utility canon mg5700 series
      -ij scan utility canon mg7700 series
      -ij scan utility canon mx470 series
      -ij scan utility canon mx530 series
      -ij scan utility canon mx920 series
      -ij scan utility canon ts3100 series
      -ij scan utility canon ts3300 series
      -ij scan utility canon ts5100 series
      -ij scan utility canon ts6100 series

      -

      Download the MP driver for your device

      -

      On the product page, click on the Drivers & Downloads tab. Then, select Android as your operating system and click on the MP driver for your device. This driver contains the IJ Scan Utility APK file that you need to install on your device. Click on the Download button and save the file to your device or computer.

      -

      Run the file to install the scanner software

      -

      After downloading the file, you need to run it to install the scanner software on your device or computer. If you downloaded the file to your device, you need to enable the option to install apps from unknown sources on your device settings. Then, locate the file and tap on it to start the installation. Follow the instructions on the screen to complete the process. If you downloaded the file to your computer, you need to connect your device to your computer via USB cable or Wi-Fi. Then, locate the file and double-click on it to start the installation. Follow the instructions on the screen to complete the process.

      -

      How to Use IJ Scan Utility APK

      -

      Launch the app from your device or computer

      -

      Once you have installed the scanner software, you can launch the app from your device or computer. If you are using your device, you can find the app icon on your home screen or app drawer. Tap on it to open the app. If you are using your computer, you can find the app icon on your desktop or start menu. Double-click on it to open the app.

      -

      Select the scan type and adjust the settings

      -

      When you open the app, you will see a list of scan types that you can choose from, such as Auto, Document, Photo, Custom, Stitch, and OCR. Select the scan type that suits your needs and adjust the settings accordingly. You can change the resolution, color mode, file format, file name, save location, and more. You can also crop, rotate, enhance, or apply filters to your image before scanning.

      -

      Click Scan to start scanning

      -

      After selecting the scan type and adjusting the settings, you can click Scan to start scanning. The app will automatically detect your Canon device and start scanning. You will see a preview of your scanned image on your device or computer screen. You can make any final adjustments if needed.

      -

      Save or share the scanned image

      -

      Once you are satisfied with your scanned image, you can save or share it. You can save it to your device or computer memory, cloud storage, or SD card. You can also share it via email, social media, messaging apps, or other apps that support image files.

      -

      Conclusion

      -

      IJ Scan Utility APK is a useful app that allows you to scan photos and documents easily with your Canon printer or scanner and your Android device or computer. It offers various features such as one-click scanning, customizable settings, image stitching, and online product information. It is easy to download and install from Canon's website and simple to use with a user-friendly interface. If you have a Canon device and want to scan images with ease, you should give IJ Scan Utility APK a try.

      -

      FAQs

      -
        -
      • What are the benefits of using IJ Scan Utility APK?
      • -
      • Some of the benefits of using IJ Scan Utility APK are:
          -
        • You can scan images with just one click without opening any other app or program.
        • -
        • You can customize the output settings and edit images before saving or sharing them.
        • -
        • You can scan images larger than the platen and stitch them together into one seamless image.
        • -
        • You can access online product information and support for your Canon device.
        • -
        -
      • -
      • Is IJ Scan Utility APK safe to use?
      • -
      • IJ Scan Utility APK is safe to use as long as you download it from Canon's official website or a trusted source online. You should avoid downloading it from unknown or suspicious websites that may contain malware or viruses that could harm your device or compromise your privacy.
      • -
      • How much space does IJ Scan Utility APK take up on my device?
      • -
      • The size of IJ Scan Utility APK may vary depending on your device model and operating system version. However, it is usually around 20 MB in size. You should make sure that you have enough free space on your device before downloading and installing the app. You can also delete the app if you don't need it anymore.
      • -
      • Can I use IJ Scan Utility APK with other devices besides Canon?
      • -
      • No, IJ Scan Utility APK is only compatible with Canon printers and scanners. You cannot use it with other devices from different brands or manufacturers. You should check the compatibility list of IJ Scan Utility APK before downloading and installing it.
      • -
      • Can I use IJ Scan Utility APK offline?
      • -
      • Yes, you can use IJ Scan Utility APK offline as long as you have already installed the app and connected your Canon device to your Android device or computer. However, some features of the app may require an internet connection, such as accessing online product information or sharing images via email or social media.
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/visualization/optflow.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/visualization/optflow.py deleted file mode 100644 index b4c3ce980f9f6c74c85fe714aca1623a08ae7a8d..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/visualization/optflow.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from __future__ import division - -import numpy as np - -from annotator.mmpkg.mmcv.image import rgb2bgr -from annotator.mmpkg.mmcv.video import flowread -from .image import imshow - - -def flowshow(flow, win_name='', wait_time=0): - """Show optical flow. - - Args: - flow (ndarray or str): The optical flow to be displayed. - win_name (str): The window name. - wait_time (int): Value of waitKey param. - """ - flow = flowread(flow) - flow_img = flow2rgb(flow) - imshow(rgb2bgr(flow_img), win_name, wait_time) - - -def flow2rgb(flow, color_wheel=None, unknown_thr=1e6): - """Convert flow map to RGB image. - - Args: - flow (ndarray): Array of optical flow. - color_wheel (ndarray or None): Color wheel used to map flow field to - RGB colorspace. Default color wheel will be used if not specified. - unknown_thr (str): Values above this threshold will be marked as - unknown and thus ignored. - - Returns: - ndarray: RGB image that can be visualized. - """ - assert flow.ndim == 3 and flow.shape[-1] == 2 - if color_wheel is None: - color_wheel = make_color_wheel() - assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3 - num_bins = color_wheel.shape[0] - - dx = flow[:, :, 0].copy() - dy = flow[:, :, 1].copy() - - ignore_inds = ( - np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) | - (np.abs(dy) > unknown_thr)) - dx[ignore_inds] = 0 - dy[ignore_inds] = 0 - - rad = np.sqrt(dx**2 + dy**2) - if np.any(rad > np.finfo(float).eps): - max_rad = np.max(rad) - dx /= max_rad - dy /= max_rad - - rad = np.sqrt(dx**2 + dy**2) - angle = np.arctan2(-dy, -dx) / np.pi - - bin_real = (angle + 1) / 2 * (num_bins - 1) - bin_left = np.floor(bin_real).astype(int) - bin_right = (bin_left + 1) % num_bins - w = (bin_real - bin_left.astype(np.float32))[..., None] - flow_img = (1 - - w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :] - small_ind = rad <= 1 - flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind]) - flow_img[np.logical_not(small_ind)] *= 0.75 - - flow_img[ignore_inds, :] = 0 - - return flow_img - - -def make_color_wheel(bins=None): - """Build a color wheel. - - Args: - bins(list or tuple, optional): Specify the number of bins for each - color range, corresponding to six ranges: red -> yellow, - yellow -> green, green -> cyan, cyan -> blue, blue -> magenta, - magenta -> red. [15, 6, 4, 11, 13, 6] is used for default - (see Middlebury). - - Returns: - ndarray: Color wheel of shape (total_bins, 3). - """ - if bins is None: - bins = [15, 6, 4, 11, 13, 6] - assert len(bins) == 6 - - RY, YG, GC, CB, BM, MR = tuple(bins) - - ry = [1, np.arange(RY) / RY, 0] - yg = [1 - np.arange(YG) / YG, 1, 0] - gc = [0, 1, np.arange(GC) / GC] - cb = [0, 1 - np.arange(CB) / CB, 1] - bm = [np.arange(BM) / BM, 0, 1] - mr = [1, 0, 1 - np.arange(MR) / MR] - - num_bins = RY + YG + GC + CB + BM + MR - - color_wheel = np.zeros((3, num_bins), dtype=np.float32) - - col = 0 - for i, color in enumerate([ry, yg, gc, cb, bm, mr]): - for j in range(3): - color_wheel[j, col:col + bins[i]] = color[j] - col += bins[i] - - return color_wheel.T diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/projects/README.md b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/projects/README.md deleted file mode 100644 index 95afe7ff8c8a9bd2f56621fcc3c1bdac11c256a9..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/projects/README.md +++ /dev/null @@ -1,2 +0,0 @@ - -Projects live in the [`projects` directory](../../projects) under the root of this repository, but not here. diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/backbones/vit.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/backbones/vit.py deleted file mode 100644 index 59e4479650690e08cbc4cab9427aefda47c2116d..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/backbones/vit.py +++ /dev/null @@ -1,459 +0,0 @@ -"""Modified from https://github.com/rwightman/pytorch-image- -models/blob/master/timm/models/vision_transformer.py.""" - -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as cp -from annotator.uniformer.mmcv.cnn import (Conv2d, Linear, build_activation_layer, build_norm_layer, - constant_init, kaiming_init, normal_init) -from annotator.uniformer.mmcv.runner import _load_checkpoint -from annotator.uniformer.mmcv.utils.parrots_wrapper import _BatchNorm - -from annotator.uniformer.mmseg.utils import get_root_logger -from ..builder import BACKBONES -from ..utils import DropPath, trunc_normal_ - - -class Mlp(nn.Module): - """MLP layer for Encoder block. - - Args: - in_features(int): Input dimension for the first fully - connected layer. - hidden_features(int): Output dimension for the first fully - connected layer. - out_features(int): Output dementsion for the second fully - connected layer. - act_cfg(dict): Config dict for activation layer. - Default: dict(type='GELU'). - drop(float): Drop rate for the dropout layer. Dropout rate has - to be between 0 and 1. Default: 0. - """ - - def __init__(self, - in_features, - hidden_features=None, - out_features=None, - act_cfg=dict(type='GELU'), - drop=0.): - super(Mlp, self).__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = Linear(in_features, hidden_features) - self.act = build_activation_layer(act_cfg) - self.fc2 = Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class Attention(nn.Module): - """Attention layer for Encoder block. - - Args: - dim (int): Dimension for the input vector. - num_heads (int): Number of parallel attention heads. - qkv_bias (bool): Enable bias for qkv if True. Default: False. - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. - attn_drop (float): Drop rate for attention output weights. - Default: 0. - proj_drop (float): Drop rate for output weights. Default: 0. - """ - - def __init__(self, - dim, - num_heads=8, - qkv_bias=False, - qk_scale=None, - attn_drop=0., - proj_drop=0.): - super(Attention, self).__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim**-0.5 - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - def forward(self, x): - b, n, c = x.shape - qkv = self.qkv(x).reshape(b, n, 3, self.num_heads, - c // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(b, n, c) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class Block(nn.Module): - """Implements encoder block with residual connection. - - Args: - dim (int): The feature dimension. - num_heads (int): Number of parallel attention heads. - mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. - drop (float): Drop rate for mlp output weights. Default: 0. - attn_drop (float): Drop rate for attention output weights. - Default: 0. - proj_drop (float): Drop rate for attn layer output weights. - Default: 0. - drop_path (float): Drop rate for paths of model. - Default: 0. - act_cfg (dict): Config dict for activation layer. - Default: dict(type='GELU'). - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN', requires_grad=True). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - """ - - def __init__(self, - dim, - num_heads, - mlp_ratio=4, - qkv_bias=False, - qk_scale=None, - drop=0., - attn_drop=0., - proj_drop=0., - drop_path=0., - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN', eps=1e-6), - with_cp=False): - super(Block, self).__init__() - self.with_cp = with_cp - _, self.norm1 = build_norm_layer(norm_cfg, dim) - self.attn = Attention(dim, num_heads, qkv_bias, qk_scale, attn_drop, - proj_drop) - self.drop_path = DropPath( - drop_path) if drop_path > 0. else nn.Identity() - _, self.norm2 = build_norm_layer(norm_cfg, dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp( - in_features=dim, - hidden_features=mlp_hidden_dim, - act_cfg=act_cfg, - drop=drop) - - def forward(self, x): - - def _inner_forward(x): - out = x + self.drop_path(self.attn(self.norm1(x))) - out = out + self.drop_path(self.mlp(self.norm2(out))) - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -class PatchEmbed(nn.Module): - """Image to Patch Embedding. - - Args: - img_size (int | tuple): Input image size. - default: 224. - patch_size (int): Width and height for a patch. - default: 16. - in_channels (int): Input channels for images. Default: 3. - embed_dim (int): The embedding dimension. Default: 768. - """ - - def __init__(self, - img_size=224, - patch_size=16, - in_channels=3, - embed_dim=768): - super(PatchEmbed, self).__init__() - if isinstance(img_size, int): - self.img_size = (img_size, img_size) - elif isinstance(img_size, tuple): - self.img_size = img_size - else: - raise TypeError('img_size must be type of int or tuple') - h, w = self.img_size - self.patch_size = (patch_size, patch_size) - self.num_patches = (h // patch_size) * (w // patch_size) - self.proj = Conv2d( - in_channels, embed_dim, kernel_size=patch_size, stride=patch_size) - - def forward(self, x): - return self.proj(x).flatten(2).transpose(1, 2) - - -@BACKBONES.register_module() -class VisionTransformer(nn.Module): - """Vision transformer backbone. - - A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for - Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 - - Args: - img_size (tuple): input image size. Default: (224, 224). - patch_size (int, tuple): patch size. Default: 16. - in_channels (int): number of input channels. Default: 3. - embed_dim (int): embedding dimension. Default: 768. - depth (int): depth of transformer. Default: 12. - num_heads (int): number of attention heads. Default: 12. - mlp_ratio (int): ratio of mlp hidden dim to embedding dim. - Default: 4. - out_indices (list | tuple | int): Output from which stages. - Default: -1. - qkv_bias (bool): enable bias for qkv if True. Default: True. - qk_scale (float): override default qk scale of head_dim ** -0.5 if set. - drop_rate (float): dropout rate. Default: 0. - attn_drop_rate (float): attention dropout rate. Default: 0. - drop_path_rate (float): Rate of DropPath. Default: 0. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN', eps=1e-6, requires_grad=True). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='GELU'). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - final_norm (bool): Whether to add a additional layer to normalize - final feature map. Default: False. - interpolate_mode (str): Select the interpolate mode for position - embeding vector resize. Default: bicubic. - with_cls_token (bool): If concatenating class token into image tokens - as transformer input. Default: True. - with_cp (bool): Use checkpoint or not. Using checkpoint - will save some memory while slowing down the training speed. - Default: False. - """ - - def __init__(self, - img_size=(224, 224), - patch_size=16, - in_channels=3, - embed_dim=768, - depth=12, - num_heads=12, - mlp_ratio=4, - out_indices=11, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0., - norm_cfg=dict(type='LN', eps=1e-6, requires_grad=True), - act_cfg=dict(type='GELU'), - norm_eval=False, - final_norm=False, - with_cls_token=True, - interpolate_mode='bicubic', - with_cp=False): - super(VisionTransformer, self).__init__() - self.img_size = img_size - self.patch_size = patch_size - self.features = self.embed_dim = embed_dim - self.patch_embed = PatchEmbed( - img_size=img_size, - patch_size=patch_size, - in_channels=in_channels, - embed_dim=embed_dim) - - self.with_cls_token = with_cls_token - self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) - self.pos_embed = nn.Parameter( - torch.zeros(1, self.patch_embed.num_patches + 1, embed_dim)) - self.pos_drop = nn.Dropout(p=drop_rate) - - if isinstance(out_indices, int): - self.out_indices = [out_indices] - elif isinstance(out_indices, list) or isinstance(out_indices, tuple): - self.out_indices = out_indices - else: - raise TypeError('out_indices must be type of int, list or tuple') - - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth) - ] # stochastic depth decay rule - self.blocks = nn.ModuleList([ - Block( - dim=embed_dim, - num_heads=num_heads, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=dpr[i], - attn_drop=attn_drop_rate, - act_cfg=act_cfg, - norm_cfg=norm_cfg, - with_cp=with_cp) for i in range(depth) - ]) - - self.interpolate_mode = interpolate_mode - self.final_norm = final_norm - if final_norm: - _, self.norm = build_norm_layer(norm_cfg, embed_dim) - - self.norm_eval = norm_eval - self.with_cp = with_cp - - def init_weights(self, pretrained=None): - if isinstance(pretrained, str): - logger = get_root_logger() - checkpoint = _load_checkpoint(pretrained, logger=logger) - if 'state_dict' in checkpoint: - state_dict = checkpoint['state_dict'] - else: - state_dict = checkpoint - - if 'pos_embed' in state_dict.keys(): - if self.pos_embed.shape != state_dict['pos_embed'].shape: - logger.info(msg=f'Resize the pos_embed shape from \ -{state_dict["pos_embed"].shape} to {self.pos_embed.shape}') - h, w = self.img_size - pos_size = int( - math.sqrt(state_dict['pos_embed'].shape[1] - 1)) - state_dict['pos_embed'] = self.resize_pos_embed( - state_dict['pos_embed'], (h, w), (pos_size, pos_size), - self.patch_size, self.interpolate_mode) - - self.load_state_dict(state_dict, False) - - elif pretrained is None: - # We only implement the 'jax_impl' initialization implemented at - # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501 - trunc_normal_(self.pos_embed, std=.02) - trunc_normal_(self.cls_token, std=.02) - for n, m in self.named_modules(): - if isinstance(m, Linear): - trunc_normal_(m.weight, std=.02) - if m.bias is not None: - if 'mlp' in n: - normal_init(m.bias, std=1e-6) - else: - constant_init(m.bias, 0) - elif isinstance(m, Conv2d): - kaiming_init(m.weight, mode='fan_in') - if m.bias is not None: - constant_init(m.bias, 0) - elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)): - constant_init(m.bias, 0) - constant_init(m.weight, 1.0) - else: - raise TypeError('pretrained must be a str or None') - - def _pos_embeding(self, img, patched_img, pos_embed): - """Positiong embeding method. - - Resize the pos_embed, if the input image size doesn't match - the training size. - Args: - img (torch.Tensor): The inference image tensor, the shape - must be [B, C, H, W]. - patched_img (torch.Tensor): The patched image, it should be - shape of [B, L1, C]. - pos_embed (torch.Tensor): The pos_embed weighs, it should be - shape of [B, L2, c]. - Return: - torch.Tensor: The pos encoded image feature. - """ - assert patched_img.ndim == 3 and pos_embed.ndim == 3, \ - 'the shapes of patched_img and pos_embed must be [B, L, C]' - x_len, pos_len = patched_img.shape[1], pos_embed.shape[1] - if x_len != pos_len: - if pos_len == (self.img_size[0] // self.patch_size) * ( - self.img_size[1] // self.patch_size) + 1: - pos_h = self.img_size[0] // self.patch_size - pos_w = self.img_size[1] // self.patch_size - else: - raise ValueError( - 'Unexpected shape of pos_embed, got {}.'.format( - pos_embed.shape)) - pos_embed = self.resize_pos_embed(pos_embed, img.shape[2:], - (pos_h, pos_w), self.patch_size, - self.interpolate_mode) - return self.pos_drop(patched_img + pos_embed) - - @staticmethod - def resize_pos_embed(pos_embed, input_shpae, pos_shape, patch_size, mode): - """Resize pos_embed weights. - - Resize pos_embed using bicubic interpolate method. - Args: - pos_embed (torch.Tensor): pos_embed weights. - input_shpae (tuple): Tuple for (input_h, intput_w). - pos_shape (tuple): Tuple for (pos_h, pos_w). - patch_size (int): Patch size. - Return: - torch.Tensor: The resized pos_embed of shape [B, L_new, C] - """ - assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]' - input_h, input_w = input_shpae - pos_h, pos_w = pos_shape - cls_token_weight = pos_embed[:, 0] - pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):] - pos_embed_weight = pos_embed_weight.reshape( - 1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2) - pos_embed_weight = F.interpolate( - pos_embed_weight, - size=[input_h // patch_size, input_w // patch_size], - align_corners=False, - mode=mode) - cls_token_weight = cls_token_weight.unsqueeze(1) - pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2) - pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1) - return pos_embed - - def forward(self, inputs): - B = inputs.shape[0] - - x = self.patch_embed(inputs) - - cls_tokens = self.cls_token.expand(B, -1, -1) - x = torch.cat((cls_tokens, x), dim=1) - x = self._pos_embeding(inputs, x, self.pos_embed) - - if not self.with_cls_token: - # Remove class token for transformer input - x = x[:, 1:] - - outs = [] - for i, blk in enumerate(self.blocks): - x = blk(x) - if i == len(self.blocks) - 1: - if self.final_norm: - x = self.norm(x) - if i in self.out_indices: - if self.with_cls_token: - # Remove class token and reshape token for decoder head - out = x[:, 1:] - else: - out = x - B, _, C = out.shape - out = out.reshape(B, inputs.shape[2] // self.patch_size, - inputs.shape[3] // self.patch_size, - C).permute(0, 3, 1, 2) - outs.append(out) - - return tuple(outs) - - def train(self, mode=True): - super(VisionTransformer, self).train(mode) - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, nn.LayerNorm): - m.eval() diff --git a/spaces/cpluoiudy00001/QQsign/Dockerfile b/spaces/cpluoiudy00001/QQsign/Dockerfile deleted file mode 100644 index 5b81d3b20c5bee450cf55a0ace7e5c95d58f72af..0000000000000000000000000000000000000000 --- a/spaces/cpluoiudy00001/QQsign/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM openjdk:11.0-jdk - -# 设置时区 -ENV TZ Asia/Shanghai - -# 设置工作目录 -WORKDIR /app - -# 复制解压包和txlib到工作目录 -COPY unidbg-fetch-qsign /app -COPY txlib /app/txlib - -# 设置命令 -CMD bash bin/unidbg-fetch-qsign --host=0.0.0.0 --port=7860 --count=$COUNT --library=txlib/$TXLIB_VERSION --android_id=$ANDROID_ID - -# 暴露端口 -EXPOSE 7860 diff --git a/spaces/dariush-bahrami/color_transfer/references.md b/spaces/dariush-bahrami/color_transfer/references.md deleted file mode 100644 index 83592211c78197926818d1e89c07e9df111e7a05..0000000000000000000000000000000000000000 --- a/spaces/dariush-bahrami/color_transfer/references.md +++ /dev/null @@ -1 +0,0 @@ -This space is an [implementation](https://github.com/dariush-bahrami/reinhard_color_transfer) of ["Color transfer between images" by E. Reinhard et. al.](https://doi.org/10.1109/38.946629) \ No newline at end of file diff --git a/spaces/datastx/csv-analysis/app.py b/spaces/datastx/csv-analysis/app.py deleted file mode 100644 index 3ea52618bb85879e9c2404f0075bfae261c7aa3b..0000000000000000000000000000000000000000 --- a/spaces/datastx/csv-analysis/app.py +++ /dev/null @@ -1,20 +0,0 @@ -import streamlit as st -from dotenv import load_dotenv -from utils import query_agent - -load_dotenv() - - -st.title("Let's do some analysis on your CSV") -st.header("Please upload your CSV file here:") - -# Capture the CSV file -data = st.file_uploader("Upload CSV file",type="csv") - -query = st.text_area("Enter your query") -button = st.button("Generate Response") - -if button: - # Get Response - answer = query_agent(data,query) - st.write(answer) \ No newline at end of file diff --git a/spaces/davertor/colorizing_images/deoldify/filters.py b/spaces/davertor/colorizing_images/deoldify/filters.py deleted file mode 100644 index 762d046d6fddbdf9eaa47da451e48a3890978912..0000000000000000000000000000000000000000 --- a/spaces/davertor/colorizing_images/deoldify/filters.py +++ /dev/null @@ -1,120 +0,0 @@ -from numpy import ndarray -from abc import ABC, abstractmethod -from .critics import colorize_crit_learner -from fastai.core import * -from fastai.vision import * -from fastai.vision.image import * -from fastai.vision.data import * -from fastai import * -import math -from scipy import misc -import cv2 -from PIL import Image as PilImage - - -class IFilter(ABC): - @abstractmethod - def filter( - self, orig_image: PilImage, filtered_image: PilImage, render_factor: int - ) -> PilImage: - pass - - -class BaseFilter(IFilter): - def __init__(self, learn: Learner, stats: tuple = imagenet_stats): - super().__init__() - self.learn = learn - self.device = next(self.learn.model.parameters()).device - self.norm, self.denorm = normalize_funcs(*stats) - - def _transform(self, image: PilImage) -> PilImage: - return image - - def _scale_to_square(self, orig: PilImage, targ: int) -> PilImage: - # a simple stretch to fit a square really makes a big difference in rendering quality/consistency. - # I've tried padding to the square as well (reflect, symetric, constant, etc). Not as good! - targ_sz = (targ, targ) - return orig.resize(targ_sz, resample=PIL.Image.BILINEAR) - - def _get_model_ready_image(self, orig: PilImage, sz: int) -> PilImage: - result = self._scale_to_square(orig, sz) - result = self._transform(result) - return result - - def _model_process(self, orig: PilImage, sz: int) -> PilImage: - model_image = self._get_model_ready_image(orig, sz) - x = pil2tensor(model_image, np.float32) - x = x.to(self.device) - x.div_(255) - x, y = self.norm((x, x), do_x=True) - - try: - result = self.learn.pred_batch( - ds_type=DatasetType.Valid, batch=(x[None], y[None]), reconstruct=True - ) - except RuntimeError as rerr: - if 'memory' not in str(rerr): - raise rerr - print('Warning: render_factor was set too high, and out of memory error resulted. Returning original image.') - return model_image - - out = result[0] - out = self.denorm(out.px, do_x=False) - out = image2np(out * 255).astype(np.uint8) - return PilImage.fromarray(out) - - def _unsquare(self, image: PilImage, orig: PilImage) -> PilImage: - targ_sz = orig.size - image = image.resize(targ_sz, resample=PIL.Image.BILINEAR) - return image - - -class ColorizerFilter(BaseFilter): - def __init__(self, learn: Learner, stats: tuple = imagenet_stats): - super().__init__(learn=learn, stats=stats) - self.render_base = 16 - - def filter( - self, orig_image: PilImage, filtered_image: PilImage, render_factor: int, post_process: bool = True) -> PilImage: - render_sz = render_factor * self.render_base - model_image = self._model_process(orig=filtered_image, sz=render_sz) - raw_color = self._unsquare(model_image, orig_image) - - if post_process: - return self._post_process(raw_color, orig_image) - else: - return raw_color - - def _transform(self, image: PilImage) -> PilImage: - return image.convert('LA').convert('RGB') - - # This takes advantage of the fact that human eyes are much less sensitive to - # imperfections in chrominance compared to luminance. This means we can - # save a lot on memory and processing in the model, yet get a great high - # resolution result at the end. This is primarily intended just for - # inference - def _post_process(self, raw_color: PilImage, orig: PilImage) -> PilImage: - color_np = np.asarray(raw_color) - orig_np = np.asarray(orig) - color_yuv = cv2.cvtColor(color_np, cv2.COLOR_BGR2YUV) - # do a black and white transform first to get better luminance values - orig_yuv = cv2.cvtColor(orig_np, cv2.COLOR_BGR2YUV) - hires = np.copy(orig_yuv) - hires[:, :, 1:3] = color_yuv[:, :, 1:3] - final = cv2.cvtColor(hires, cv2.COLOR_YUV2BGR) - final = PilImage.fromarray(final) - return final - - -class MasterFilter(BaseFilter): - def __init__(self, filters: [IFilter], render_factor: int): - self.filters = filters - self.render_factor = render_factor - - def filter( - self, orig_image: PilImage, filtered_image: PilImage, render_factor: int = None, post_process: bool = True) -> PilImage: - render_factor = self.render_factor if render_factor is None else render_factor - for filter in self.filters: - filtered_image = filter.filter(orig_image, filtered_image, render_factor, post_process) - - return filtered_image diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/misc/vector.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/misc/vector.py deleted file mode 100644 index 666ff15cf8be0a0c17de4f86b74584d2bb27244f..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/misc/vector.py +++ /dev/null @@ -1,148 +0,0 @@ -from numbers import Number -import math -import operator -import warnings - - -__all__ = ["Vector"] - - -class Vector(tuple): - - """A math-like vector. - - Represents an n-dimensional numeric vector. ``Vector`` objects support - vector addition and subtraction, scalar multiplication and division, - negation, rounding, and comparison tests. - """ - - __slots__ = () - - def __new__(cls, values, keep=False): - if keep is not False: - warnings.warn( - "the 'keep' argument has been deprecated", - DeprecationWarning, - ) - if type(values) == Vector: - # No need to create a new object - return values - return super().__new__(cls, values) - - def __repr__(self): - return f"{self.__class__.__name__}({super().__repr__()})" - - def _vectorOp(self, other, op): - if isinstance(other, Vector): - assert len(self) == len(other) - return self.__class__(op(a, b) for a, b in zip(self, other)) - if isinstance(other, Number): - return self.__class__(op(v, other) for v in self) - raise NotImplementedError() - - def _scalarOp(self, other, op): - if isinstance(other, Number): - return self.__class__(op(v, other) for v in self) - raise NotImplementedError() - - def _unaryOp(self, op): - return self.__class__(op(v) for v in self) - - def __add__(self, other): - return self._vectorOp(other, operator.add) - - __radd__ = __add__ - - def __sub__(self, other): - return self._vectorOp(other, operator.sub) - - def __rsub__(self, other): - return self._vectorOp(other, _operator_rsub) - - def __mul__(self, other): - return self._scalarOp(other, operator.mul) - - __rmul__ = __mul__ - - def __truediv__(self, other): - return self._scalarOp(other, operator.truediv) - - def __rtruediv__(self, other): - return self._scalarOp(other, _operator_rtruediv) - - def __pos__(self): - return self._unaryOp(operator.pos) - - def __neg__(self): - return self._unaryOp(operator.neg) - - def __round__(self, *, round=round): - return self._unaryOp(round) - - def __eq__(self, other): - if isinstance(other, list): - # bw compat Vector([1, 2, 3]) == [1, 2, 3] - other = tuple(other) - return super().__eq__(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __bool__(self): - return any(self) - - __nonzero__ = __bool__ - - def __abs__(self): - return math.sqrt(sum(x * x for x in self)) - - def length(self): - """Return the length of the vector. Equivalent to abs(vector).""" - return abs(self) - - def normalized(self): - """Return the normalized vector of the vector.""" - return self / abs(self) - - def dot(self, other): - """Performs vector dot product, returning the sum of - ``a[0] * b[0], a[1] * b[1], ...``""" - assert len(self) == len(other) - return sum(a * b for a, b in zip(self, other)) - - # Deprecated methods/properties - - def toInt(self): - warnings.warn( - "the 'toInt' method has been deprecated, use round(vector) instead", - DeprecationWarning, - ) - return self.__round__() - - @property - def values(self): - warnings.warn( - "the 'values' attribute has been deprecated, use " - "the vector object itself instead", - DeprecationWarning, - ) - return list(self) - - @values.setter - def values(self, values): - raise AttributeError( - "can't set attribute, the 'values' attribute has been deprecated", - ) - - def isclose(self, other: "Vector", **kwargs) -> bool: - """Return True if the vector is close to another Vector.""" - assert len(self) == len(other) - return all(math.isclose(a, b, **kwargs) for a, b in zip(self, other)) - - -def _operator_rsub(a, b): - return operator.sub(b, a) - - -def _operator_rtruediv(a, b): - return operator.truediv(b, a) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/voltLib/lexer.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/voltLib/lexer.py deleted file mode 100644 index 706b21bbb19717a32025e505c3ae4a2e5f2154ec..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/voltLib/lexer.py +++ /dev/null @@ -1,99 +0,0 @@ -from fontTools.voltLib.error import VoltLibError - - -class Lexer(object): - NUMBER = "NUMBER" - STRING = "STRING" - NAME = "NAME" - NEWLINE = "NEWLINE" - - CHAR_WHITESPACE_ = " \t" - CHAR_NEWLINE_ = "\r\n" - CHAR_DIGIT_ = "0123456789" - CHAR_UC_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - CHAR_LC_LETTER_ = "abcdefghijklmnopqrstuvwxyz" - CHAR_UNDERSCORE_ = "_" - CHAR_PERIOD_ = "." - CHAR_NAME_START_ = ( - CHAR_UC_LETTER_ + CHAR_LC_LETTER_ + CHAR_PERIOD_ + CHAR_UNDERSCORE_ - ) - CHAR_NAME_CONTINUATION_ = CHAR_NAME_START_ + CHAR_DIGIT_ - - def __init__(self, text, filename): - self.filename_ = filename - self.line_ = 1 - self.pos_ = 0 - self.line_start_ = 0 - self.text_ = text - self.text_length_ = len(text) - - def __iter__(self): - return self - - def next(self): # Python 2 - return self.__next__() - - def __next__(self): # Python 3 - while True: - token_type, token, location = self.next_() - if token_type not in {Lexer.NEWLINE}: - return (token_type, token, location) - - def location_(self): - column = self.pos_ - self.line_start_ + 1 - return (self.filename_ or "", self.line_, column) - - def next_(self): - self.scan_over_(Lexer.CHAR_WHITESPACE_) - location = self.location_() - start = self.pos_ - text = self.text_ - limit = len(text) - if start >= limit: - raise StopIteration() - cur_char = text[start] - next_char = text[start + 1] if start + 1 < limit else None - - if cur_char == "\n": - self.pos_ += 1 - self.line_ += 1 - self.line_start_ = self.pos_ - return (Lexer.NEWLINE, None, location) - if cur_char == "\r": - self.pos_ += 2 if next_char == "\n" else 1 - self.line_ += 1 - self.line_start_ = self.pos_ - return (Lexer.NEWLINE, None, location) - if cur_char == '"': - self.pos_ += 1 - self.scan_until_('"\r\n') - if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': - self.pos_ += 1 - return (Lexer.STRING, text[start + 1 : self.pos_ - 1], location) - else: - raise VoltLibError("Expected '\"' to terminate string", location) - if cur_char in Lexer.CHAR_NAME_START_: - self.pos_ += 1 - self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - token = text[start : self.pos_] - return (Lexer.NAME, token, location) - if cur_char in Lexer.CHAR_DIGIT_: - self.scan_over_(Lexer.CHAR_DIGIT_) - return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) - if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: - self.pos_ += 1 - self.scan_over_(Lexer.CHAR_DIGIT_) - return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) - raise VoltLibError("Unexpected character: '%s'" % cur_char, location) - - def scan_over_(self, valid): - p = self.pos_ - while p < self.text_length_ and self.text_[p] in valid: - p += 1 - self.pos_ = p - - def scan_until_(self, stop_at): - p = self.pos_ - while p < self.text_length_ and self.text_[p] not in stop_at: - p += 1 - self.pos_ = p diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-dc375626.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-dc375626.css deleted file mode 100644 index 509df412c87c1270046f47ded1f979f3df656dc8..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-dc375626.css +++ /dev/null @@ -1 +0,0 @@ -label.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70{display:flex;align-items:center;cursor:pointer;color:var(--body-text-color);font-weight:var(--checkbox-label-text-weight);font-size:var(--checkbox-label-text-size);line-height:var(--line-md)}label.svelte-1ojmf70>.svelte-1ojmf70+.svelte-1ojmf70{margin-left:var(--size-2)}input.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70{--ring-color:transparent;position:relative;box-shadow:var(--input-shadow);border:1px solid var(--checkbox-border-color);border-radius:var(--checkbox-border-radius);background-color:var(--checkbox-background-color);line-height:var(--line-sm)}input.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70:checked,input.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70:checked:hover,input.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70:checked:focus{border-color:var(--checkbox-border-color-selected);background-image:var(--checkbox-check);background-color:var(--checkbox-background-color-selected)}input.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70:hover{border-color:var(--checkbox-border-color-hover);background-color:var(--checkbox-background-color-hover)}input.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70:focus{border-color:var(--checkbox-border-color-focus);background-color:var(--checkbox-background-color-focus)}input[disabled].svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70,.disabled.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70{cursor:not-allowed} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/jinja2/sandbox.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/jinja2/sandbox.py deleted file mode 100644 index 06d74148eccea79d1f5a0ca2fb76ecc246f87d62..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/jinja2/sandbox.py +++ /dev/null @@ -1,428 +0,0 @@ -"""A sandbox layer that ensures unsafe operations cannot be performed. -Useful when the template itself comes from an untrusted source. -""" -import operator -import types -import typing as t -from _string import formatter_field_name_split # type: ignore -from collections import abc -from collections import deque -from string import Formatter - -from markupsafe import EscapeFormatter -from markupsafe import Markup - -from .environment import Environment -from .exceptions import SecurityError -from .runtime import Context -from .runtime import Undefined - -F = t.TypeVar("F", bound=t.Callable[..., t.Any]) - -#: maximum number of items a range may produce -MAX_RANGE = 100000 - -#: Unsafe function attributes. -UNSAFE_FUNCTION_ATTRIBUTES: t.Set[str] = set() - -#: Unsafe method attributes. Function attributes are unsafe for methods too. -UNSAFE_METHOD_ATTRIBUTES: t.Set[str] = set() - -#: unsafe generator attributes. -UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"} - -#: unsafe attributes on coroutines -UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"} - -#: unsafe attributes on async generators -UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"} - -_mutable_spec: t.Tuple[t.Tuple[t.Type, t.FrozenSet[str]], ...] = ( - ( - abc.MutableSet, - frozenset( - [ - "add", - "clear", - "difference_update", - "discard", - "pop", - "remove", - "symmetric_difference_update", - "update", - ] - ), - ), - ( - abc.MutableMapping, - frozenset(["clear", "pop", "popitem", "setdefault", "update"]), - ), - ( - abc.MutableSequence, - frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]), - ), - ( - deque, - frozenset( - [ - "append", - "appendleft", - "clear", - "extend", - "extendleft", - "pop", - "popleft", - "remove", - "rotate", - ] - ), - ), -) - - -def inspect_format_method(callable: t.Callable) -> t.Optional[str]: - if not isinstance( - callable, (types.MethodType, types.BuiltinMethodType) - ) or callable.__name__ not in ("format", "format_map"): - return None - - obj = callable.__self__ - - if isinstance(obj, str): - return obj - - return None - - -def safe_range(*args: int) -> range: - """A range that can't generate ranges with a length of more than - MAX_RANGE items. - """ - rng = range(*args) - - if len(rng) > MAX_RANGE: - raise OverflowError( - "Range too big. The sandbox blocks ranges larger than" - f" MAX_RANGE ({MAX_RANGE})." - ) - - return rng - - -def unsafe(f: F) -> F: - """Marks a function or method as unsafe. - - .. code-block: python - - @unsafe - def delete(self): - pass - """ - f.unsafe_callable = True # type: ignore - return f - - -def is_internal_attribute(obj: t.Any, attr: str) -> bool: - """Test if the attribute given is an internal python attribute. For - example this function returns `True` for the `func_code` attribute of - python objects. This is useful if the environment method - :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden. - - >>> from jinja2.sandbox import is_internal_attribute - >>> is_internal_attribute(str, "mro") - True - >>> is_internal_attribute(str, "upper") - False - """ - if isinstance(obj, types.FunctionType): - if attr in UNSAFE_FUNCTION_ATTRIBUTES: - return True - elif isinstance(obj, types.MethodType): - if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES: - return True - elif isinstance(obj, type): - if attr == "mro": - return True - elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)): - return True - elif isinstance(obj, types.GeneratorType): - if attr in UNSAFE_GENERATOR_ATTRIBUTES: - return True - elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType): - if attr in UNSAFE_COROUTINE_ATTRIBUTES: - return True - elif hasattr(types, "AsyncGeneratorType") and isinstance( - obj, types.AsyncGeneratorType - ): - if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES: - return True - return attr.startswith("__") - - -def modifies_known_mutable(obj: t.Any, attr: str) -> bool: - """This function checks if an attribute on a builtin mutable object - (list, dict, set or deque) or the corresponding ABCs would modify it - if called. - - >>> modifies_known_mutable({}, "clear") - True - >>> modifies_known_mutable({}, "keys") - False - >>> modifies_known_mutable([], "append") - True - >>> modifies_known_mutable([], "index") - False - - If called with an unsupported object, ``False`` is returned. - - >>> modifies_known_mutable("foo", "upper") - False - """ - for typespec, unsafe in _mutable_spec: - if isinstance(obj, typespec): - return attr in unsafe - return False - - -class SandboxedEnvironment(Environment): - """The sandboxed environment. It works like the regular environment but - tells the compiler to generate sandboxed code. Additionally subclasses of - this environment may override the methods that tell the runtime what - attributes or functions are safe to access. - - If the template tries to access insecure code a :exc:`SecurityError` is - raised. However also other exceptions may occur during the rendering so - the caller has to ensure that all exceptions are caught. - """ - - sandboxed = True - - #: default callback table for the binary operators. A copy of this is - #: available on each instance of a sandboxed environment as - #: :attr:`binop_table` - default_binop_table: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = { - "+": operator.add, - "-": operator.sub, - "*": operator.mul, - "/": operator.truediv, - "//": operator.floordiv, - "**": operator.pow, - "%": operator.mod, - } - - #: default callback table for the unary operators. A copy of this is - #: available on each instance of a sandboxed environment as - #: :attr:`unop_table` - default_unop_table: t.Dict[str, t.Callable[[t.Any], t.Any]] = { - "+": operator.pos, - "-": operator.neg, - } - - #: a set of binary operators that should be intercepted. Each operator - #: that is added to this set (empty by default) is delegated to the - #: :meth:`call_binop` method that will perform the operator. The default - #: operator callback is specified by :attr:`binop_table`. - #: - #: The following binary operators are interceptable: - #: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**`` - #: - #: The default operation form the operator table corresponds to the - #: builtin function. Intercepted calls are always slower than the native - #: operator call, so make sure only to intercept the ones you are - #: interested in. - #: - #: .. versionadded:: 2.6 - intercepted_binops: t.FrozenSet[str] = frozenset() - - #: a set of unary operators that should be intercepted. Each operator - #: that is added to this set (empty by default) is delegated to the - #: :meth:`call_unop` method that will perform the operator. The default - #: operator callback is specified by :attr:`unop_table`. - #: - #: The following unary operators are interceptable: ``+``, ``-`` - #: - #: The default operation form the operator table corresponds to the - #: builtin function. Intercepted calls are always slower than the native - #: operator call, so make sure only to intercept the ones you are - #: interested in. - #: - #: .. versionadded:: 2.6 - intercepted_unops: t.FrozenSet[str] = frozenset() - - def __init__(self, *args: t.Any, **kwargs: t.Any) -> None: - super().__init__(*args, **kwargs) - self.globals["range"] = safe_range - self.binop_table = self.default_binop_table.copy() - self.unop_table = self.default_unop_table.copy() - - def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool: - """The sandboxed environment will call this method to check if the - attribute of an object is safe to access. Per default all attributes - starting with an underscore are considered private as well as the - special attributes of internal python objects as returned by the - :func:`is_internal_attribute` function. - """ - return not (attr.startswith("_") or is_internal_attribute(obj, attr)) - - def is_safe_callable(self, obj: t.Any) -> bool: - """Check if an object is safely callable. By default callables - are considered safe unless decorated with :func:`unsafe`. - - This also recognizes the Django convention of setting - ``func.alters_data = True``. - """ - return not ( - getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False) - ) - - def call_binop( - self, context: Context, operator: str, left: t.Any, right: t.Any - ) -> t.Any: - """For intercepted binary operator calls (:meth:`intercepted_binops`) - this function is executed instead of the builtin operator. This can - be used to fine tune the behavior of certain operators. - - .. versionadded:: 2.6 - """ - return self.binop_table[operator](left, right) - - def call_unop(self, context: Context, operator: str, arg: t.Any) -> t.Any: - """For intercepted unary operator calls (:meth:`intercepted_unops`) - this function is executed instead of the builtin operator. This can - be used to fine tune the behavior of certain operators. - - .. versionadded:: 2.6 - """ - return self.unop_table[operator](arg) - - def getitem( - self, obj: t.Any, argument: t.Union[str, t.Any] - ) -> t.Union[t.Any, Undefined]: - """Subscribe an object from sandboxed code.""" - try: - return obj[argument] - except (TypeError, LookupError): - if isinstance(argument, str): - try: - attr = str(argument) - except Exception: - pass - else: - try: - value = getattr(obj, attr) - except AttributeError: - pass - else: - if self.is_safe_attribute(obj, argument, value): - return value - return self.unsafe_undefined(obj, argument) - return self.undefined(obj=obj, name=argument) - - def getattr(self, obj: t.Any, attribute: str) -> t.Union[t.Any, Undefined]: - """Subscribe an object from sandboxed code and prefer the - attribute. The attribute passed *must* be a bytestring. - """ - try: - value = getattr(obj, attribute) - except AttributeError: - try: - return obj[attribute] - except (TypeError, LookupError): - pass - else: - if self.is_safe_attribute(obj, attribute, value): - return value - return self.unsafe_undefined(obj, attribute) - return self.undefined(obj=obj, name=attribute) - - def unsafe_undefined(self, obj: t.Any, attribute: str) -> Undefined: - """Return an undefined object for unsafe attributes.""" - return self.undefined( - f"access to attribute {attribute!r} of" - f" {type(obj).__name__!r} object is unsafe.", - name=attribute, - obj=obj, - exc=SecurityError, - ) - - def format_string( - self, - s: str, - args: t.Tuple[t.Any, ...], - kwargs: t.Dict[str, t.Any], - format_func: t.Optional[t.Callable] = None, - ) -> str: - """If a format call is detected, then this is routed through this - method so that our safety sandbox can be used for it. - """ - formatter: SandboxedFormatter - if isinstance(s, Markup): - formatter = SandboxedEscapeFormatter(self, escape=s.escape) - else: - formatter = SandboxedFormatter(self) - - if format_func is not None and format_func.__name__ == "format_map": - if len(args) != 1 or kwargs: - raise TypeError( - "format_map() takes exactly one argument" - f" {len(args) + (kwargs is not None)} given" - ) - - kwargs = args[0] - args = () - - rv = formatter.vformat(s, args, kwargs) - return type(s)(rv) - - def call( - __self, # noqa: B902 - __context: Context, - __obj: t.Any, - *args: t.Any, - **kwargs: t.Any, - ) -> t.Any: - """Call an object from sandboxed code.""" - fmt = inspect_format_method(__obj) - if fmt is not None: - return __self.format_string(fmt, args, kwargs, __obj) - - # the double prefixes are to avoid double keyword argument - # errors when proxying the call. - if not __self.is_safe_callable(__obj): - raise SecurityError(f"{__obj!r} is not safely callable") - return __context.call(__obj, *args, **kwargs) - - -class ImmutableSandboxedEnvironment(SandboxedEnvironment): - """Works exactly like the regular `SandboxedEnvironment` but does not - permit modifications on the builtin mutable objects `list`, `set`, and - `dict` by using the :func:`modifies_known_mutable` function. - """ - - def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool: - if not super().is_safe_attribute(obj, attr, value): - return False - - return not modifies_known_mutable(obj, attr) - - -class SandboxedFormatter(Formatter): - def __init__(self, env: Environment, **kwargs: t.Any) -> None: - self._env = env - super().__init__(**kwargs) - - def get_field( - self, field_name: str, args: t.Sequence[t.Any], kwargs: t.Mapping[str, t.Any] - ) -> t.Tuple[t.Any, str]: - first, rest = formatter_field_name_split(field_name) - obj = self.get_value(first, args, kwargs) - for is_attr, i in rest: - if is_attr: - obj = self._env.getattr(obj, i) - else: - obj = self._env.getitem(obj, i) - return obj, first - - -class SandboxedEscapeFormatter(SandboxedFormatter, EscapeFormatter): - pass diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/audioldm/__init__.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/audioldm/__init__.py deleted file mode 100644 index fa9ae5b8ca12e51ac82040cd9224d8a2fb583e0d..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/audioldm/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -from ...utils import ( - OptionalDependencyNotAvailable, - is_torch_available, - is_transformers_available, - is_transformers_version, -) - -# from .pipeline_audioldm import AudioLDMPipeline - -try: - if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( - AudioLDMPipeline, - ) -else: - from .pipeline_audioldm import AudioLDMPipeline diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py deleted file mode 100644 index 0e312c5e30138e106930421ad8c55c23f01e60e7..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -from dataclasses import dataclass -from enum import Enum -from typing import List, Optional, Union - -import numpy as np -import PIL -from PIL import Image - -from ...utils import BaseOutput, is_torch_available, is_transformers_available - - -@dataclass -class SemanticStableDiffusionPipelineOutput(BaseOutput): - """ - Output class for Stable Diffusion pipelines. - - Args: - images (`List[PIL.Image.Image]` or `np.ndarray`) - List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, - num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. - nsfw_content_detected (`List[bool]`) - List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, or `None` if safety checking could not be performed. - """ - - images: Union[List[PIL.Image.Image], np.ndarray] - nsfw_content_detected: Optional[List[bool]] - - -if is_transformers_available() and is_torch_available(): - from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline diff --git a/spaces/declare-lab/tango/diffusers/tests/pipelines/repaint/test_repaint.py b/spaces/declare-lab/tango/diffusers/tests/pipelines/repaint/test_repaint.py deleted file mode 100644 index 060e6c9161baab099bc11b3d843dd4b48f7e2fb6..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/tests/pipelines/repaint/test_repaint.py +++ /dev/null @@ -1,162 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -import numpy as np -import torch - -from diffusers import RePaintPipeline, RePaintScheduler, UNet2DModel -from diffusers.utils.testing_utils import load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device - -from ...pipeline_params import IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_INPAINTING_PARAMS -from ...test_pipelines_common import PipelineTesterMixin - - -torch.backends.cuda.matmul.allow_tf32 = False - - -class RepaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = RePaintPipeline - params = IMAGE_INPAINTING_PARAMS - {"width", "height", "guidance_scale"} - required_optional_params = PipelineTesterMixin.required_optional_params - { - "latents", - "num_images_per_prompt", - "callback", - "callback_steps", - } - batch_params = IMAGE_INPAINTING_BATCH_PARAMS - test_cpu_offload = False - - def get_dummy_components(self): - torch.manual_seed(0) - torch.manual_seed(0) - unet = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - scheduler = RePaintScheduler() - components = {"unet": unet, "scheduler": scheduler} - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - image = np.random.RandomState(seed).standard_normal((1, 3, 32, 32)) - image = torch.from_numpy(image).to(device=device, dtype=torch.float32) - mask = (image > 0).to(device=device, dtype=torch.float32) - inputs = { - "image": image, - "mask_image": mask, - "generator": generator, - "num_inference_steps": 5, - "eta": 0.0, - "jump_length": 2, - "jump_n_sample": 2, - "output_type": "numpy", - } - return inputs - - def test_repaint(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - sd_pipe = RePaintPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - image = sd_pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 32, 32, 3) - expected_slice = np.array([1.0000, 0.5426, 0.5497, 0.2200, 1.0000, 1.0000, 0.5623, 1.0000, 0.6274]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - - @skip_mps - def test_save_load_local(self): - return super().test_save_load_local() - - # RePaint can hardly be made deterministic since the scheduler is currently always - # nondeterministic - @unittest.skip("non-deterministic pipeline") - def test_inference_batch_single_identical(self): - return super().test_inference_batch_single_identical() - - @skip_mps - def test_dict_tuple_outputs_equivalent(self): - return super().test_dict_tuple_outputs_equivalent() - - @skip_mps - def test_save_load_optional_components(self): - return super().test_save_load_optional_components() - - @skip_mps - def test_attention_slicing_forward_pass(self): - return super().test_attention_slicing_forward_pass() - - -@nightly -@require_torch_gpu -class RepaintPipelineNightlyTests(unittest.TestCase): - def tearDown(self): - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_celebahq(self): - original_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" - "repaint/celeba_hq_256.png" - ) - mask_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/mask_256.png" - ) - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" - "repaint/celeba_hq_256_result.npy" - ) - - model_id = "google/ddpm-ema-celebahq-256" - unet = UNet2DModel.from_pretrained(model_id) - scheduler = RePaintScheduler.from_pretrained(model_id) - - repaint = RePaintPipeline(unet=unet, scheduler=scheduler).to(torch_device) - repaint.set_progress_bar_config(disable=None) - repaint.enable_attention_slicing() - - generator = torch.manual_seed(0) - output = repaint( - original_image, - mask_image, - num_inference_steps=250, - eta=0.0, - jump_length=10, - jump_n_sample=10, - generator=generator, - output_type="np", - ) - image = output.images[0] - - assert image.shape == (256, 256, 3) - assert np.abs(expected_image - image).mean() < 1e-2 diff --git a/spaces/devoworm-group/membrane_segmentation/setup.sh b/spaces/devoworm-group/membrane_segmentation/setup.sh deleted file mode 100644 index f0ab2585fe12edf5a8ea8eb3a8614ba23ed52e7f..0000000000000000000000000000000000000000 --- a/spaces/devoworm-group/membrane_segmentation/setup.sh +++ /dev/null @@ -1,8 +0,0 @@ -mkdir -p ~/.streamlit/ -echo "\ -[server]\n\ -headless = true\n\ -port = $PORT\n\ -enableCORS = false\n\ -\n\ -" > ~/.streamlit/config.toml \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Da Cor A Cor Inexistente Israel Pedrosa Pdf Download TOP.md b/spaces/diacanFperku/AutoGPT/Da Cor A Cor Inexistente Israel Pedrosa Pdf Download TOP.md deleted file mode 100644 index 5b4a0fc696842e15111af15854c10bed15d9c3d9..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Da Cor A Cor Inexistente Israel Pedrosa Pdf Download TOP.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Da Cor A Cor Inexistente Israel Pedrosa Pdf Download


      Downloadhttps://gohhs.com/2uFUNw



      - -Thank you completely much for downloading steven spielberg. ... Rather than enjoying a good PDF past a mug of coffee in the afternoon, on the other ... vol 1, ici ofdm matlab code, predictable revenue, da cor or inexistente by israel pedrosa. 1fdad05405
      -
      -
      -

      diff --git a/spaces/diacanFperku/AutoGPT/Filmeprivatedepierrewoomantensaoanalemcontinenteafricano.md b/spaces/diacanFperku/AutoGPT/Filmeprivatedepierrewoomantensaoanalemcontinenteafricano.md deleted file mode 100644 index 3f53dfb6b005407baac3f014f81230aada02d354..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Filmeprivatedepierrewoomantensaoanalemcontinenteafricano.md +++ /dev/null @@ -1,6 +0,0 @@ -

      filmeprivatedepierrewoomantensaoanalemcontinenteafricano


      Download Ziphttps://gohhs.com/2uFTor



      - -filmeprivatedepierrewoomantensaoanalemcontinenteafricano · Free download Hear v1.0.1738 Serial Key Full Version. Download american ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/diacanFperku/AutoGPT/HD Online Player (Dunkirk (English) Hindi 720p Free Do).md b/spaces/diacanFperku/AutoGPT/HD Online Player (Dunkirk (English) Hindi 720p Free Do).md deleted file mode 100644 index fa878fd690360cf0c59fc0ff063165dc56e76b76..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/HD Online Player (Dunkirk (English) Hindi 720p Free Do).md +++ /dev/null @@ -1,34 +0,0 @@ - -

      How to Watch Dunkirk (2017) in Hindi Dubbed Online for Free

      - -

      Dunkirk is a 2017 war film directed by Christopher Nolan that depicts the evacuation of Allied soldiers from the beaches of Dunkirk, France, during World War II. The film stars Fionn Whitehead, Tom Hardy, Kenneth Branagh, Cillian Murphy, Mark Rylance, and Harry Styles. Dunkirk was praised by critics and audiences for its cinematography, direction, sound design, and score. It also received eight Oscar nominations, including Best Picture and Best Director.

      - -

      If you are a fan of war movies or want to watch a thrilling and realistic depiction of a historical event, you might be interested in watching Dunkirk online. However, if you prefer to watch it in Hindi dubbed version, you might face some difficulties in finding a reliable and legal source. That's why we have prepared this guide to help you watch Dunkirk (2017) in Hindi dubbed online for free.

      -

      HD Online Player (Dunkirk (English) hindi 720p free do)


      Download File 🆓 https://gohhs.com/2uFULP



      - -

      Where to Watch Dunkirk (2017) in Hindi Dubbed Online for Free

      - -

      There are several websites that claim to offer Dunkirk (2017) in Hindi dubbed online for free. However, most of them are either illegal, unsafe, or low-quality. Some of them might even contain malware or viruses that can harm your device or steal your personal information. Therefore, we advise you to avoid such websites and use only trusted and legal sources.

      - -

      One of the best options to watch Dunkirk (2017) in Hindi dubbed online for free is to use an HD online player that can stream the movie from various sources. An HD online player is a software or a web application that allows you to watch movies and TV shows online without downloading them. It also lets you choose the quality, language, and subtitles of the video.

      - -

      One of the most popular and reliable HD online players is HD Online Player (Dunkirk (English) hindi 720p free do). This HD online player can stream Dunkirk (2017) in Hindi dubbed online for free from various sources, such as YouTube, Dailymotion, Vimeo, etc. It also offers high-quality video and audio, as well as fast loading speed and smooth playback.

      - -

      How to Use HD Online Player (Dunkirk (English) hindi 720p free do)

      - -

      To use HD Online Player (Dunkirk (English) hindi 720p free do), you need to follow these simple steps:

      - -
        -
      1. Go to the official website of HD Online Player (Dunkirk (English) hindi 720p free do) by clicking here.[^2^]
      2. -
      3. On the homepage, you will see a search box where you can enter the name of the movie you want to watch. Type "Dunkirk" and click on the search button.
      4. -
      5. You will see a list of results that match your query. Choose the one that says "Dunkirk (2017) Full Movie in Hindi Dubbed".
      6. -
      7. You will be redirected to a page where you can watch the movie online. You can also adjust the quality, language, and subtitles of the video according to your preference.
      8. -
      9. Enjoy watching Dunkirk (2017) in Hindi dubbed online for free with HD Online Player (Dunkirk (English) hindi 720p free do).
      10. -
      - -

      Conclusion

      - -

      Dunkirk is a masterpiece of war cinema that deserves to be watched by everyone who appreciates history and filmmaking. If you want to watch it in Hindi dubbed version online for free, you can use HD Online Player (Dunkirk (English) hindi 720p free do), which is a safe and legal way to stream the movie from various sources. We hope this guide was helpful and informative for you. If you have any questions or feedback, please let us know in the comments below.

      -

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Malibu Anderson Paak Download 24.md b/spaces/diacanFperku/AutoGPT/Malibu Anderson Paak Download 24.md deleted file mode 100644 index 1eb555b3d7ee4c67650bacc1f53077d9b6b3b047..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Malibu Anderson Paak Download 24.md +++ /dev/null @@ -1,28 +0,0 @@ - -

      How to Download Malibu by Anderson .Paak for Free

      -

      Malibu is the second studio album by American singer and rapper Anderson .Paak, released on January 15, 2016. The album features guest appearances from ScHoolboy Q, BJ the Chicago Kid, The Game, Talib Kweli, Rapsody and more. It received critical acclaim and was nominated for a Grammy Award for Best Urban Contemporary Album.

      -

      If you are a fan of Anderson .Paak's soulful and eclectic style of music, you might be wondering how to download Malibu for free. Here are some ways you can do that:

      -

      Malibu Anderson Paak Download 24


      Download File >>> https://gohhs.com/2uFVtG



      -
        -
      • One option is to visit the Internet Archive website[^1^], where you can stream or download the entire album in MP3 format. You can also find other albums by Anderson .Paak on this site.
      • -
      • Another option is to visit the NewAlbumReleases.net website[^2^], where you can download the album in MP3 format using Rapidgator or Hitfile links. However, be careful of pop-up ads and malware on this site.
      • -
      • A third option is to listen to the TBA podcast episode[^3^] where they review Malibu and share their opinions on each track. You can stream or download the podcast from the Internet Archive website as well.
      • -
      • A fourth option is to use SoundCloud[^4^], where you can find some tracks from Malibu uploaded by other users. However, you might not be able to find the full album or download the tracks without a SoundCloud Go subscription.
      • -
      -

      These are some of the ways you can download Malibu by Anderson .Paak for free. However, if you enjoy his music and want to support him, you can also buy the album from iTunes, Amazon, Google Play or other online platforms.

      Malibu is not only a great album by Anderson .Paak, but also a reflection of his personal and musical journey. Born and raised in Oxnard, California, Anderson .Paak had a turbulent childhood and struggled with homelessness and addiction. He started making music as a teenager and worked as a drummer, producer and vocalist for various artists. He gained recognition after being featured on Dr. Dre's album Compton in 2015.

      -

      Malibu is inspired by Anderson .Paak's experiences of living in Malibu, California, where he moved after signing with Dr. Dre's label Aftermath Entertainment. The album showcases his versatility and creativity as an artist, blending elements of hip hop, R&B, soul, funk, jazz and rock. The album also explores themes of love, family, faith, identity and resilience.

      -

      Some of the highlights of the album include:

      -

      -
        -
      • The Bird: The opening track of the album, where Anderson .Paak sings about his family background and his gratitude for his parents and grandparents.
      • -
      • Heart Don't Stand a Chance: A smooth and groovy track where Anderson .Paak flirts with a woman and tries to win her over.
      • -
      • The Waters: A soulful and introspective track where Anderson .Paak reflects on his struggles and achievements in the music industry, featuring BJ the Chicago Kid on the chorus.
      • -
      • The Season/Carry Me: A two-part track where Anderson .Paak switches from rapping to singing, narrating his life story from his childhood to his present success.
      • -
      • Am I Wrong: A funky and upbeat track where Anderson .Paak questions his relationship with a woman and invites her to dance with him, featuring ScHoolboy Q on the verse.
      • -
      • Without You: A catchy and emotional track where Anderson .Paak expresses his regret and remorse for hurting a woman he loves, featuring Rapsody on the verse.
      • -
      • Come Down: A energetic and infectious track where Anderson .Paak boasts about his skills and charisma, urging everyone to join him on the dance floor.
      • -
      • The Dreamer: The closing track of the album, where Anderson .Paak celebrates his dreams and aspirations, encouraging others to do the same, featuring Talib Kweli and Timan Family Choir on the chorus.
      • -
      -

      Malibu is a masterpiece by Anderson .Paak that showcases his talent and personality as an artist. It is an album that you can listen to over and over again, discovering new details and meanings each time. If you haven't listened to it yet, you should definitely give it a try.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Ni.no.Kuni.II.Revenant.Kingdom-CODEX Cheat Engine HOT.md b/spaces/diacanFperku/AutoGPT/Ni.no.Kuni.II.Revenant.Kingdom-CODEX Cheat Engine HOT.md deleted file mode 100644 index 6faec59568f69cd81fce3285163d4f16e339a599..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Ni.no.Kuni.II.Revenant.Kingdom-CODEX Cheat Engine HOT.md +++ /dev/null @@ -1,38 +0,0 @@ -

      Ni.no.Kuni.II.Revenant.Kingdom-CODEX cheat engine


      Download Ziphttps://gohhs.com/2uFUaF



      - -As for the pre-order bonuses, don't expect that in the final version. I'm going to have it in for the basic edition and leave the first DLC and special edition alone. Sorry for the lack of communication about this. Hopefully I'll get the second or DLC out in the future. - -- Changed: - -- The magic meter is less forgiving now - -- Enchanted equipment is affected by this system now - -The above changes make it so that you'll need to have a high magic score to use magic, which is nice. I noticed that my magic score went down by a few points when using the enchanted equipment, which is good. - -My final thoughts about this: it's a huge improvement and I'm excited to see how much the game will change over time. I do still have a few things I'd like to change, but at the same time the amount of progress this game has made in only three months is pretty amazing. I'd love to see what the developers can do in a year. I've seen some people ask if they could just wait for a full year and get all the updates. They're totally free to do that and I wish them well, but at the same time this game would be much more polished in a year than it is now. - -My only caveat for this is that it's not perfect, but the best way I know to describe it is that it doesn't feel like a full release. It feels like a demo or a beta. This is good, but it means that you might be able to find something that hasn't been implemented yet. I did this in the early game when I entered the unicorn side quest, but I didn't encounter anything before I finished the map. - -Thank you all for your support. I'm planning to do another walkthrough after the upcoming patches come out. - -Credits - -- Screenshot is a picture from our game room in D.C. - -- The map on the top is a screenshot from the early game - -Super Smash Bros. Mii Ver. Special Edition: - -Moderator - -General Manager - -Ragnarok Studios - -Check out the update thread for Ni No Kuni 2: Here - -Ni No Kuni 2: https 4fefd39f24
      -
      -
      -

      diff --git a/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/mel_processing.py b/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/mel_processing.py deleted file mode 100644 index 50435ecf88ef4fb6c1d47f3e6edd04c3ea7d3e80..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/mel_processing.py +++ /dev/null @@ -1,112 +0,0 @@ -import math -import os -import random -import torch -from torch import nn -import torch.nn.functional as F -import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/digitalxingtong/Eileen-Bert-Vits2/monotonic_align/__init__.py b/spaces/digitalxingtong/Eileen-Bert-Vits2/monotonic_align/__init__.py deleted file mode 100644 index a323673bb16070d6d0fffddb939b657d0915ff1b..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Eileen-Bert-Vits2/monotonic_align/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - - -def maximum_path(neg_cent, mask): - """ numba optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) \ No newline at end of file diff --git a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/scnet_roi_head.py b/spaces/dineshreddy/WALT/mmdet/models/roi_heads/scnet_roi_head.py deleted file mode 100644 index 85aaa2f0600afbdfc8b0917cb5f341740776a603..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/scnet_roi_head.py +++ /dev/null @@ -1,582 +0,0 @@ -import torch -import torch.nn.functional as F - -from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, - merge_aug_masks, multiclass_nms) -from ..builder import HEADS, build_head, build_roi_extractor -from .cascade_roi_head import CascadeRoIHead - - -@HEADS.register_module() -class SCNetRoIHead(CascadeRoIHead): - """RoIHead for `SCNet `_. - - Args: - num_stages (int): number of cascade stages. - stage_loss_weights (list): loss weight of cascade stages. - semantic_roi_extractor (dict): config to init semantic roi extractor. - semantic_head (dict): config to init semantic head. - feat_relay_head (dict): config to init feature_relay_head. - glbctx_head (dict): config to init global context head. - """ - - def __init__(self, - num_stages, - stage_loss_weights, - semantic_roi_extractor=None, - semantic_head=None, - feat_relay_head=None, - glbctx_head=None, - **kwargs): - super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights, - **kwargs) - assert self.with_bbox and self.with_mask - assert not self.with_shared_head # shared head is not supported - - if semantic_head is not None: - self.semantic_roi_extractor = build_roi_extractor( - semantic_roi_extractor) - self.semantic_head = build_head(semantic_head) - - if feat_relay_head is not None: - self.feat_relay_head = build_head(feat_relay_head) - - if glbctx_head is not None: - self.glbctx_head = build_head(glbctx_head) - - def init_mask_head(self, mask_roi_extractor, mask_head): - """Initialize ``mask_head``""" - if mask_roi_extractor is not None: - self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) - self.mask_head = build_head(mask_head) - - def init_weights(self, pretrained): - """Initialize the weights in head. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - for i in range(self.num_stages): - if self.with_bbox: - self.bbox_roi_extractor[i].init_weights() - self.bbox_head[i].init_weights() - if self.with_mask: - self.mask_roi_extractor.init_weights() - self.mask_head.init_weights() - if self.with_semantic: - self.semantic_head.init_weights() - if self.with_glbctx: - self.glbctx_head.init_weights() - if self.with_feat_relay: - self.feat_relay_head.init_weights() - - @property - def with_semantic(self): - """bool: whether the head has semantic head""" - return hasattr(self, - 'semantic_head') and self.semantic_head is not None - - @property - def with_feat_relay(self): - """bool: whether the head has feature relay head""" - return (hasattr(self, 'feat_relay_head') - and self.feat_relay_head is not None) - - @property - def with_glbctx(self): - """bool: whether the head has global context head""" - return hasattr(self, 'glbctx_head') and self.glbctx_head is not None - - def _fuse_glbctx(self, roi_feats, glbctx_feat, rois): - """Fuse global context feats with roi feats.""" - assert roi_feats.size(0) == rois.size(0) - img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long() - fused_feats = torch.zeros_like(roi_feats) - for img_id in img_inds: - inds = (rois[:, 0] == img_id.item()) - fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id] - return fused_feats - - def _slice_pos_feats(self, feats, sampling_results): - """Get features from pos rois.""" - num_rois = [res.bboxes.size(0) for res in sampling_results] - num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results] - inds = torch.zeros(sum(num_rois), dtype=torch.bool) - start = 0 - for i in range(len(num_rois)): - start = 0 if i == 0 else start + num_rois[i - 1] - stop = start + num_pos_rois[i] - inds[start:stop] = 1 - sliced_feats = feats[inds] - return sliced_feats - - def _bbox_forward(self, - stage, - x, - rois, - semantic_feat=None, - glbctx_feat=None): - """Box head forward function used in both training and testing.""" - bbox_roi_extractor = self.bbox_roi_extractor[stage] - bbox_head = self.bbox_head[stage] - bbox_feats = bbox_roi_extractor( - x[:len(bbox_roi_extractor.featmap_strides)], rois) - if self.with_semantic and semantic_feat is not None: - bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], - rois) - if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: - bbox_semantic_feat = F.adaptive_avg_pool2d( - bbox_semantic_feat, bbox_feats.shape[-2:]) - bbox_feats += bbox_semantic_feat - if self.with_glbctx and glbctx_feat is not None: - bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois) - cls_score, bbox_pred, relayed_feat = bbox_head( - bbox_feats, return_shared_feat=True) - - bbox_results = dict( - cls_score=cls_score, - bbox_pred=bbox_pred, - relayed_feat=relayed_feat) - return bbox_results - - def _mask_forward(self, - x, - rois, - semantic_feat=None, - glbctx_feat=None, - relayed_feat=None): - """Mask head forward function used in both training and testing.""" - mask_feats = self.mask_roi_extractor( - x[:self.mask_roi_extractor.num_inputs], rois) - if self.with_semantic and semantic_feat is not None: - mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], - rois) - if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: - mask_semantic_feat = F.adaptive_avg_pool2d( - mask_semantic_feat, mask_feats.shape[-2:]) - mask_feats += mask_semantic_feat - if self.with_glbctx and glbctx_feat is not None: - mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois) - if self.with_feat_relay and relayed_feat is not None: - mask_feats = mask_feats + relayed_feat - mask_pred = self.mask_head(mask_feats) - mask_results = dict(mask_pred=mask_pred) - - return mask_results - - def _bbox_forward_train(self, - stage, - x, - sampling_results, - gt_bboxes, - gt_labels, - rcnn_train_cfg, - semantic_feat=None, - glbctx_feat=None): - """Run forward function and calculate loss for box head in training.""" - bbox_head = self.bbox_head[stage] - rois = bbox2roi([res.bboxes for res in sampling_results]) - bbox_results = self._bbox_forward( - stage, - x, - rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - - bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes, - gt_labels, rcnn_train_cfg) - loss_bbox = bbox_head.loss(bbox_results['cls_score'], - bbox_results['bbox_pred'], rois, - *bbox_targets) - - bbox_results.update( - loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) - return bbox_results - - def _mask_forward_train(self, - x, - sampling_results, - gt_masks, - rcnn_train_cfg, - semantic_feat=None, - glbctx_feat=None, - relayed_feat=None): - """Run forward function and calculate loss for mask head in - training.""" - pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - mask_results = self._mask_forward( - x, - pos_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat, - relayed_feat=relayed_feat) - - mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, - rcnn_train_cfg) - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - loss_mask = self.mask_head.loss(mask_results['mask_pred'], - mask_targets, pos_labels) - - mask_results = loss_mask - return mask_results - - def forward_train(self, - x, - img_metas, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - gt_semantic_seg=None): - """ - Args: - x (list[Tensor]): list of multi-level img features. - - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - - proposal_list (list[Tensors]): list of region proposals. - - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - - gt_labels (list[Tensor]): class indices corresponding to each box - - gt_bboxes_ignore (None, list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - gt_masks (None, Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - gt_semantic_seg (None, list[Tensor]): semantic segmentation masks - used if the architecture supports semantic segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - losses = dict() - - # semantic segmentation branch - if self.with_semantic: - semantic_pred, semantic_feat = self.semantic_head(x) - loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) - losses['loss_semantic_seg'] = loss_seg - else: - semantic_feat = None - - # global context branch - if self.with_glbctx: - mc_pred, glbctx_feat = self.glbctx_head(x) - loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels) - losses['loss_glbctx'] = loss_glbctx - else: - glbctx_feat = None - - for i in range(self.num_stages): - self.current_stage = i - rcnn_train_cfg = self.train_cfg[i] - lw = self.stage_loss_weights[i] - - # assign gts and sample proposals - sampling_results = [] - bbox_assigner = self.bbox_assigner[i] - bbox_sampler = self.bbox_sampler[i] - num_imgs = len(img_metas) - if gt_bboxes_ignore is None: - gt_bboxes_ignore = [None for _ in range(num_imgs)] - - for j in range(num_imgs): - assign_result = bbox_assigner.assign(proposal_list[j], - gt_bboxes[j], - gt_bboxes_ignore[j], - gt_labels[j]) - sampling_result = bbox_sampler.sample( - assign_result, - proposal_list[j], - gt_bboxes[j], - gt_labels[j], - feats=[lvl_feat[j][None] for lvl_feat in x]) - sampling_results.append(sampling_result) - - bbox_results = \ - self._bbox_forward_train( - i, x, sampling_results, gt_bboxes, gt_labels, - rcnn_train_cfg, semantic_feat, glbctx_feat) - roi_labels = bbox_results['bbox_targets'][0] - - for name, value in bbox_results['loss_bbox'].items(): - losses[f's{i}.{name}'] = ( - value * lw if 'loss' in name else value) - - # refine boxes - if i < self.num_stages - 1: - pos_is_gts = [res.pos_is_gt for res in sampling_results] - with torch.no_grad(): - proposal_list = self.bbox_head[i].refine_bboxes( - bbox_results['rois'], roi_labels, - bbox_results['bbox_pred'], pos_is_gts, img_metas) - - if self.with_feat_relay: - relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'], - sampling_results) - relayed_feat = self.feat_relay_head(relayed_feat) - else: - relayed_feat = None - - mask_results = self._mask_forward_train(x, sampling_results, gt_masks, - rcnn_train_cfg, semantic_feat, - glbctx_feat, relayed_feat) - mask_lw = sum(self.stage_loss_weights) - losses['loss_mask'] = mask_lw * mask_results['loss_mask'] - - return losses - - def simple_test(self, x, proposal_list, img_metas, rescale=False): - """Test without augmentation.""" - if self.with_semantic: - _, semantic_feat = self.semantic_head(x) - else: - semantic_feat = None - - if self.with_glbctx: - mc_pred, glbctx_feat = self.glbctx_head(x) - else: - glbctx_feat = None - - num_imgs = len(proposal_list) - img_shapes = tuple(meta['img_shape'] for meta in img_metas) - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - # "ms" in variable names means multi-stage - ms_scores = [] - rcnn_test_cfg = self.test_cfg - - rois = bbox2roi(proposal_list) - for i in range(self.num_stages): - bbox_head = self.bbox_head[i] - bbox_results = self._bbox_forward( - i, - x, - rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - # split batch bbox prediction back to each image - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - num_proposals_per_img = tuple(len(p) for p in proposal_list) - rois = rois.split(num_proposals_per_img, 0) - cls_score = cls_score.split(num_proposals_per_img, 0) - bbox_pred = bbox_pred.split(num_proposals_per_img, 0) - ms_scores.append(cls_score) - - if i < self.num_stages - 1: - bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score] - rois = torch.cat([ - bbox_head.regress_by_class(rois[i], bbox_label[i], - bbox_pred[i], img_metas[i]) - for i in range(num_imgs) - ]) - - # average scores of each image by stages - cls_score = [ - sum([score[i] for score in ms_scores]) / float(len(ms_scores)) - for i in range(num_imgs) - ] - - # apply bbox post-processing to each image individually - det_bboxes = [] - det_labels = [] - for i in range(num_imgs): - det_bbox, det_label = self.bbox_head[-1].get_bboxes( - rois[i], - cls_score[i], - bbox_pred[i], - img_shapes[i], - scale_factors[i], - rescale=rescale, - cfg=rcnn_test_cfg) - det_bboxes.append(det_bbox) - det_labels.append(det_label) - det_bbox_results = [ - bbox2result(det_bboxes[i], det_labels[i], - self.bbox_head[-1].num_classes) - for i in range(num_imgs) - ] - - if self.with_mask: - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - mask_classes = self.mask_head.num_classes - det_segm_results = [[[] for _ in range(mask_classes)] - for _ in range(num_imgs)] - else: - if rescale and not isinstance(scale_factors[0], float): - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i] - for i in range(num_imgs) - ] - mask_rois = bbox2roi(_bboxes) - - # get relay feature on mask_rois - bbox_results = self._bbox_forward( - -1, - x, - mask_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - relayed_feat = bbox_results['relayed_feat'] - relayed_feat = self.feat_relay_head(relayed_feat) - - mask_results = self._mask_forward( - x, - mask_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat, - relayed_feat=relayed_feat) - mask_pred = mask_results['mask_pred'] - - # split batch mask prediction back to each image - num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes) - mask_preds = mask_pred.split(num_bbox_per_img, 0) - - # apply mask post-processing to each image individually - det_segm_results = [] - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - det_segm_results.append( - [[] for _ in range(self.mask_head.num_classes)]) - else: - segm_result = self.mask_head.get_seg_masks( - mask_preds[i], _bboxes[i], det_labels[i], - self.test_cfg, ori_shapes[i], scale_factors[i], - rescale) - det_segm_results.append(segm_result) - - # return results - if self.with_mask: - return list(zip(det_bbox_results, det_segm_results)) - else: - return det_bbox_results - - def aug_test(self, img_feats, proposal_list, img_metas, rescale=False): - if self.with_semantic: - semantic_feats = [ - self.semantic_head(feat)[1] for feat in img_feats - ] - else: - semantic_feats = [None] * len(img_metas) - - if self.with_glbctx: - glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats] - else: - glbctx_feats = [None] * len(img_metas) - - rcnn_test_cfg = self.test_cfg - aug_bboxes = [] - aug_scores = [] - for x, img_meta, semantic_feat, glbctx_feat in zip( - img_feats, img_metas, semantic_feats, glbctx_feats): - # only one image in the batch - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - - proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, - scale_factor, flip) - # "ms" in variable names means multi-stage - ms_scores = [] - - rois = bbox2roi([proposals]) - for i in range(self.num_stages): - bbox_head = self.bbox_head[i] - bbox_results = self._bbox_forward( - i, - x, - rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - ms_scores.append(bbox_results['cls_score']) - if i < self.num_stages - 1: - bbox_label = bbox_results['cls_score'].argmax(dim=1) - rois = bbox_head.regress_by_class( - rois, bbox_label, bbox_results['bbox_pred'], - img_meta[0]) - - cls_score = sum(ms_scores) / float(len(ms_scores)) - bboxes, scores = self.bbox_head[-1].get_bboxes( - rois, - cls_score, - bbox_results['bbox_pred'], - img_shape, - scale_factor, - rescale=False, - cfg=None) - aug_bboxes.append(bboxes) - aug_scores.append(scores) - - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) - det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, - rcnn_test_cfg.score_thr, - rcnn_test_cfg.nms, - rcnn_test_cfg.max_per_img) - - det_bbox_results = bbox2result(det_bboxes, det_labels, - self.bbox_head[-1].num_classes) - - if self.with_mask: - if det_bboxes.shape[0] == 0: - det_segm_results = [[] - for _ in range(self.mask_head.num_classes)] - else: - aug_masks = [] - for x, img_meta, semantic_feat, glbctx_feat in zip( - img_feats, img_metas, semantic_feats, glbctx_feats): - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, - scale_factor, flip) - mask_rois = bbox2roi([_bboxes]) - # get relay feature on mask_rois - bbox_results = self._bbox_forward( - -1, - x, - mask_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - relayed_feat = bbox_results['relayed_feat'] - relayed_feat = self.feat_relay_head(relayed_feat) - mask_results = self._mask_forward( - x, - mask_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat, - relayed_feat=relayed_feat) - mask_pred = mask_results['mask_pred'] - aug_masks.append(mask_pred.sigmoid().cpu().numpy()) - merged_masks = merge_aug_masks(aug_masks, img_metas, - self.test_cfg) - ori_shape = img_metas[0][0]['ori_shape'] - det_segm_results = self.mask_head.get_seg_masks( - merged_masks, - det_bboxes, - det_labels, - rcnn_test_cfg, - ori_shape, - scale_factor=1.0, - rescale=False) - return [(det_bbox_results, det_segm_results)] - else: - return [det_bbox_results] diff --git a/spaces/dmeck/RVC-Speakers/vits/modules/attentions/__init__.py b/spaces/dmeck/RVC-Speakers/vits/modules/attentions/__init__.py deleted file mode 100644 index 651ca58b2bcd864167a5199f06bf70373c15dc2c..0000000000000000000000000000000000000000 --- a/spaces/dmeck/RVC-Speakers/vits/modules/attentions/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from vits.modules.attentions.attentions import * diff --git a/spaces/dylanebert/list-of-splats/style.css b/spaces/dylanebert/list-of-splats/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/dylanebert/list-of-splats/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/emanlapponi/sound-refukculator/app.py b/spaces/emanlapponi/sound-refukculator/app.py deleted file mode 100644 index f97ace4872ed60bd1fa212653ac6146513b83f73..0000000000000000000000000000000000000000 --- a/spaces/emanlapponi/sound-refukculator/app.py +++ /dev/null @@ -1,116 +0,0 @@ -import io - -import numpy as np -import pedalboard as pb -import pydub -import soundfile as sf -import streamlit as st -from audio_recorder_streamlit import audio_recorder - - -def stretch(audio, factor): - buffer = [] - for s in audio: - for _ in range(factor): - buffer.append(s) - return np.array(buffer) - - -st.title("👹 Sound Refukculator") - -audio = np.array([]) - -source = st.radio("Choose source", ["Microphone", "File upload"]) -if source == "Microphone": - audio_bytes = audio_recorder() - if audio_bytes: - audio, sr = sf.read(io.BytesIO(audio_bytes)) -else: - audio_bytes = st.file_uploader("Upload file") - if audio_bytes: - if audio_bytes.name.endswith("mp3"): - audio = pydub.AudioSegment.from_mp3(audio_bytes) - sr = audio.frame_rate - audio = np.float32(audio.get_array_of_samples()) / 2**15 - else: - audio, sr = sf.read(audio_bytes) - - -if audio.any(): - st.write("Original sound:") - st.audio(audio.T, sample_rate=sr) - cols = st.columns(5) - chunk_dividend = cols[0].slider("🫀 Chunkizer", 16, 128, step=16) - prob = cols[1].slider("🤡 Impredictidiblize", 0, 100, 0) - stretch_factor = cols[2].slider("🧌 Trollizer", 0, 10, 0) - reps = cols[3].slider("🫣 Repetizer", 0, 1, 0) - rev_size = cols[4].slider("🚛 Hugermaker", 0.0, 0.09, 0.0) - delay_macro = cols[0].slider("🪐 Spaceometer", 0.01, 0.09, 0.0) - rounder = cols[1].slider("🤔 Rounder", 0, 100, 0) - pshift = cols[2].slider("😱 Tonermaker", 0, 1, 0) - reverse = cols[3].slider("🧶 Revolver", 0, 1, 0) - sr_factor = cols[4].slider("🏃‍♀️ Chirpidize", 1, 16, 1) - delay = pb.Pedalboard( - [pb.Delay(delay_seconds=delay_macro, feedback=delay_macro * 5)] - ) - reverb = pb.Pedalboard([pb.Reverb(room_size=rev_size)]) - chorus = pb.Pedalboard([pb.Chorus(rate_hz=rounder)]) - pshifter7 = pb.Pedalboard([pb.PitchShift(-7)]) - pshifter3 = pb.Pedalboard([pb.PitchShift(-3)]) - pshifter5 = pb.Pedalboard([pb.PitchShift(-5)]) - pshifter12 = pb.Pedalboard([pb.PitchShift(-12)]) - processed = [] - chunk_dividend = sorted(np.random.randint(audio.shape[0], size=chunk_dividend)) - for i, chunk in enumerate(np.array_split(audio, chunk_dividend)): - chunk = ( - stretch(chunk, stretch_factor) - if np.random.randint(100) < prob and stretch_factor - else chunk - ) - chunk = ( - reverb(chunk, sample_rate=sr, reset=False) - if np.random.randint(100) < prob and rev_size - else chunk - ) - chunk = ( - delay(chunk, sample_rate=sr, reset=False) - if np.random.randint(100) < prob and delay_macro - else chunk - ) - chunk = ( - chorus(chunk, sample_rate=sr, reset=False) - if np.random.randint(100) < prob and rounder - else chunk - ) - chunk = ( - pshifter7(chunk, sample_rate=sr, reset=True) - if np.random.randint(100) < prob and pshift - else chunk - ) - chunk = ( - pshifter3(chunk, sample_rate=sr, reset=True) - if np.random.randint(100) < prob and pshift - else chunk - ) - chunk = ( - pshifter5(chunk, sample_rate=sr, reset=True) - if np.random.randint(100) < prob and pshift - else chunk - ) - chunk = ( - pshifter12(chunk, sample_rate=sr, reset=True) - if np.random.randint(100) < prob and pshift - else chunk - ) - chunk = np.concatenate([chunk, chunk]) if np.random.randint(100) < prob and reps else chunk - chunk = np.flip(chunk) if np.random.randint(100) < prob and reverse else chunk - processed += [s for s in chunk] - - processed = np.array(processed) - reverb = pb.Pedalboard([pb.Reverb(room_size=0.3)]) - # processed = reverb(processed, sr, reset=False) - compressor = pb.Pedalboard([pb.Limiter(threshold_db=-1)]) - processed = compressor(processed, sr, reset=False) - - st.write("Refukculized sound:") - st.audio(processed.T, sample_rate=sr * sr_factor) diff --git a/spaces/emc348/faces-through-time/models/StyleCLIP/models/facial_recognition/model_irse.py b/spaces/emc348/faces-through-time/models/StyleCLIP/models/facial_recognition/model_irse.py deleted file mode 100644 index b1c79e0366e4a6fd92011e86df80f8b31ec671ae..0000000000000000000000000000000000000000 --- a/spaces/emc348/faces-through-time/models/StyleCLIP/models/facial_recognition/model_irse.py +++ /dev/null @@ -1,84 +0,0 @@ -from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module -from models.facial_recognition.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm - -""" -Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) -""" - - -class Backbone(Module): - def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True): - super(Backbone, self).__init__() - assert input_size in [112, 224], "input_size should be 112 or 224" - assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" - assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se" - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = bottleneck_IR - elif mode == 'ir_se': - unit_module = bottleneck_IR_SE - self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), - BatchNorm2d(64), - PReLU(64)) - if input_size == 112: - self.output_layer = Sequential(BatchNorm2d(512), - Dropout(drop_ratio), - Flatten(), - Linear(512 * 7 * 7, 512), - BatchNorm1d(512, affine=affine)) - else: - self.output_layer = Sequential(BatchNorm2d(512), - Dropout(drop_ratio), - Flatten(), - Linear(512 * 14 * 14, 512), - BatchNorm1d(512, affine=affine)) - - modules = [] - for block in blocks: - for bottleneck in block: - modules.append(unit_module(bottleneck.in_channel, - bottleneck.depth, - bottleneck.stride)) - self.body = Sequential(*modules) - - def forward(self, x): - x = self.input_layer(x) - x = self.body(x) - x = self.output_layer(x) - return l2_norm(x) - - -def IR_50(input_size): - """Constructs a ir-50 model.""" - model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False) - return model - - -def IR_101(input_size): - """Constructs a ir-101 model.""" - model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False) - return model - - -def IR_152(input_size): - """Constructs a ir-152 model.""" - model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False) - return model - - -def IR_SE_50(input_size): - """Constructs a ir_se-50 model.""" - model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False) - return model - - -def IR_SE_101(input_size): - """Constructs a ir_se-101 model.""" - model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False) - return model - - -def IR_SE_152(input_size): - """Constructs a ir_se-152 model.""" - model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False) - return model diff --git a/spaces/epexVfeibi/Imagedeblurr/100 Years Telugu Panchangam Pdf Download EXCLUSIVEl.md b/spaces/epexVfeibi/Imagedeblurr/100 Years Telugu Panchangam Pdf Download EXCLUSIVEl.md deleted file mode 100644 index 80028ef21d4b5bb1bf562b9bcf5721541e6ada5b..0000000000000000000000000000000000000000 --- a/spaces/epexVfeibi/Imagedeblurr/100 Years Telugu Panchangam Pdf Download EXCLUSIVEl.md +++ /dev/null @@ -1,6 +0,0 @@ -

      100 Years Telugu Panchangam Pdf Downloadl


      Download Ziphttps://jinyurl.com/2uEqYZ



      -
      -Extreme.rar PDF-XChange Editor Plus 27.0.324.1 + Crack AUTODATA 5.45 + Crack ... 720.mp4 ... Kako Raditi One Stvari Pdf Downloadl >>> DOWNLOAD. 1fdad05405
      -
      -
      -

      diff --git a/spaces/epexVfeibi/Imagedeblurr/Adobe Captivate 7 Full Keygen [UPDATED].md b/spaces/epexVfeibi/Imagedeblurr/Adobe Captivate 7 Full Keygen [UPDATED].md deleted file mode 100644 index 84964d81e144f219899daa5b1bc51099925939e8..0000000000000000000000000000000000000000 --- a/spaces/epexVfeibi/Imagedeblurr/Adobe Captivate 7 Full Keygen [UPDATED].md +++ /dev/null @@ -1,10 +0,0 @@ -

      Adobe Captivate 7 Full Keygen


      Download >>> https://jinyurl.com/2uEqSg



      - -Adobe Captivate 2019 Crack is a word processor that is a feature-rich video editor that is - -Adobe Captivate 2019 11.5.5.553 Crack + Serial Key Updated. Windows 10 64 bit, Windows 10, Windows 8 64 bit, Windows 8, Windows 7 64 bit, Windows 7. Adobe Captivate 2019 Crack is a word processor that is a feature-rich video editor that is developed for the Premier creative cloud and works on both Mac and Windows platforms. It has a full feature set and a streamlined user interface that keeps the familiar interface for creating and editing. It has an easy way to share your designs with your audience, a robust editing tool, and an intuitive and simple editing system. Adobe Captivate 2019 Crack can open files, preview, export, and even edit different file formats including MP4, AVI, and more. It supports professional features and easy to use. Adobe Captivate 2019 Full Version Support OS: Windows 7/8/8.1/10. (64-bit only). System Requirements: - -Adobe Captivate 2019 11.5.5.553 Crack + Serial Key Updated. Windows 10 64 bit, Windows 10, Windows 8 64 bit, Windows 8, Windows 7 64 bit, Windows 7. Adobe Captivate 2019 Crack is a word processor that is a feature-rich video editor that is developed for the Premier creative cloud and works on both Mac and Windows platforms. It has a full feature set and a streamlined user interface that keeps the familiar interface for creating and editing 4fefd39f24
      -
      -
      -

      diff --git a/spaces/etahamad/new-plant-disease-detection/README.md b/spaces/etahamad/new-plant-disease-detection/README.md deleted file mode 100644 index fb1ecef1f714eeb0b8fbda95bbfcb7f2d56beb7c..0000000000000000000000000000000000000000 --- a/spaces/etahamad/new-plant-disease-detection/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: New Plant Disease Detection -emoji: 👀 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/eunjae/LoRA-DreamBooth-Training-UI/utils.py b/spaces/eunjae/LoRA-DreamBooth-Training-UI/utils.py deleted file mode 100644 index 8fe82394db3a576d0b8bb94788cdc313a1b44392..0000000000000000000000000000000000000000 --- a/spaces/eunjae/LoRA-DreamBooth-Training-UI/utils.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import annotations - -import pathlib - - -def find_exp_dirs(ignore_repo: bool = False) -> list[str]: - repo_dir = pathlib.Path(__file__).parent - exp_root_dir = repo_dir / 'experiments' - if not exp_root_dir.exists(): - return [] - exp_dirs = sorted(exp_root_dir.glob('*')) - exp_dirs = [ - exp_dir for exp_dir in exp_dirs - if (exp_dir / 'pytorch_lora_weights.bin').exists() - ] - if ignore_repo: - exp_dirs = [ - exp_dir for exp_dir in exp_dirs if not (exp_dir / '.git').exists() - ] - return [path.relative_to(repo_dir).as_posix() for path in exp_dirs] - - -def save_model_card( - save_dir: pathlib.Path, - base_model: str, - instance_prompt: str, - test_prompt: str = '', - test_image_dir: str = '', -) -> None: - image_str = '' - if test_prompt and test_image_dir: - image_paths = sorted((save_dir / test_image_dir).glob('*')) - if image_paths: - image_str = f'Test prompt: {test_prompt}\n' - for image_path in image_paths: - rel_path = image_path.relative_to(save_dir) - image_str += f'![{image_path.stem}]({rel_path})\n' - - model_card = f'''--- -license: creativeml-openrail-m -base_model: {base_model} -instance_prompt: {instance_prompt} -tags: -- stable-diffusion -- stable-diffusion-diffusers -- text-to-image -- diffusers -- lora -inference: true ---- -# LoRA DreamBooth - {save_dir.name} - -These are LoRA adaption weights for [{base_model}](https://huggingface.co/{base_model}). The weights were trained on the instance prompt "{instance_prompt}" using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. - -{image_str} -''' - - with open(save_dir / 'README.md', 'w') as f: - f.write(model_card) diff --git a/spaces/fakezeta/pdfchat/app.py b/spaces/fakezeta/pdfchat/app.py deleted file mode 100644 index 87612f40dababcc3294c3daa0f8ac08f367112e2..0000000000000000000000000000000000000000 --- a/spaces/fakezeta/pdfchat/app.py +++ /dev/null @@ -1,110 +0,0 @@ -import streamlit as st -from streamlit_chat import message -from ingest_data import embed_doc -from query_data import get_chain -import os -import time - -st.set_page_config(page_title="LangChain Local PDF Chat", page_icon=":robot:") - -footer=""" - -""" -st.markdown(footer,unsafe_allow_html=True) - -def process_file(uploaded_file): - with open(uploaded_file.name,"wb") as f: - f.write(uploaded_file.getbuffer()) - st.write("File Uploaded successfully") - - with st.spinner("Document is being vectorized...."): - vectorstore = embed_doc(uploaded_file.name) - f.close() - os.remove(uploaded_file.name) - return vectorstore - -def get_text(): - input_text = st.text_input("You: ", value="", key="input", disabled=st.session_state.disabled) - return input_text - -def query(query): - start = time.time() - with st.spinner("Doing magic...."): - if len(st.session_state.past) > 0 and len(st.session_state.generated) > 0: - chat_history=[("HUMAN: "+st.session_state.past[-1], "ASSISTANT: "+st.session_state.generated[-1])] - else: - chat_history=[] - print("chat_history:", chat_history) - output = st.session_state.chain.run(input= query, - question= query, - vectorstore= st.session_state.vectorstore, - chat_history= chat_history - ) - end = time.time() - print("Query time: \a "+str(round(end - start,1))) - return output - - -with open("style.css") as f: - st.markdown(''.format(f.read()), unsafe_allow_html=True) - -st.header("Local Chat with Pdf") - -if "uploaded_file_name" not in st.session_state: - st.session_state.uploaded_file_name = "" - -if "past" not in st.session_state: - st.session_state.past = [] - -if "generated" not in st.session_state: - st.session_state["generated"] = [] - -if "vectorstore" not in st.session_state: - st.session_state.vectorstore = None - -if "chain" not in st.session_state: - st.session_state.chain = None - -uploaded_file = st.file_uploader("Choose a file", type=['pdf']) - -if uploaded_file: - if uploaded_file.name != st.session_state.uploaded_file_name: - st.session_state.vectorstore = None - st.session_state.chain = None - st.session_state["generated"] = [] - st.session_state.past = [] - st.session_state.uploaded_file_name = uploaded_file.name - st.session_state.all_messages = [] - print(st.session_state.uploaded_file_name) - if not st.session_state.vectorstore: - st.session_state.vectorstore = process_file(uploaded_file) - - if st.session_state.vectorstore and not st.session_state.chain: - with st.spinner("Loading Large Language Model...."): - st.session_state.chain=get_chain(st.session_state.vectorstore) - searching=False - user_input = st.text_input("You: ", value="", key="input", disabled=searching) - send_button = st.button(label="Query") - if send_button: - searching = True - output = query(user_input) - searching = False - st.session_state.past.append(user_input) - st.session_state.generated.append(output) - if st.session_state["generated"]: - for i in range(len(st.session_state["generated"]) - 1, -1, -1): - message(st.session_state["generated"][i], key=str(i)) - message(st.session_state.past[i], is_user=True, key=str(i) + "_user") diff --git a/spaces/falterWliame/Face_Mask_Detection/Korg Pa Manager V21 37 [PORTABLE].md b/spaces/falterWliame/Face_Mask_Detection/Korg Pa Manager V21 37 [PORTABLE].md deleted file mode 100644 index 6310eca2e4d45cf1d69e5b8fd320fd757137841d..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Korg Pa Manager V21 37 [PORTABLE].md +++ /dev/null @@ -1,6 +0,0 @@ -

      Korg Pa Manager V21 37


      Download >>>>> https://urlca.com/2uDdQ9



      -
      -1 have a KORG Poly-61 Synthesizer and a Color Computer. ... FEATURES - FILE MANAGER 64 A utility program custom designed for ... 24 Display Header 25-37 Label Keys 38 Display Options 39 Define Notes 40 ... Send check to P.O. Box 210, Jenkintown, PA 19046. b Si b software RAINBOW CERTIFICATION SEM The ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/falterWliame/Face_Mask_Detection/Mamalateo Tax Reviewer Pdf Free.md b/spaces/falterWliame/Face_Mask_Detection/Mamalateo Tax Reviewer Pdf Free.md deleted file mode 100644 index 348ee7eaf2364c46271b448409d8cbbdff83f615..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Mamalateo Tax Reviewer Pdf Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Mamalateo Tax Reviewer Pdf Free


      Download File ○○○ https://urlca.com/2uDclL



      - -View kupdf.com_reviewer-on-taxation-mamalateo.pdf from BUS 200 at Mindanao-Mirawi State University. TAX REVIEW FROM: ATTY. VICTORINO C. MAMALATEO 1 ... 2 ...3 ...4 ...5 ...6 ...7 ...8 ...9 ...10 ...11 ...12 . ..13 ...14 ...15 ...16 ...17 ...18 ...19 ...20 ...21 ...22 ...23 ...24 ... 25 ...26 ...27 ...28 ...29 ...30 ...31 ...32 ...33 ...34 ...35 ...36 ...37 . ..38 ...39 ...40 ...41 ...42 ...43 ...44 ...45 ...46 ...47 ...48 ...49 ... 50 ...51 ...52 ...53 ...54 ...55 ...56 ...57 ...58 ...59 ...60 ...61 ...62 . ..63 ...64 ...65 ...66 ...67 ...68 ...69 ...70 ...71 ...72 ...73 ...74 ... 75 ...76 ...77 ...78 ...79 ...80 ...81 ...82 ...83 ...84 ...85 ...86 . 8a78ff9644
      -
      -
      -

      diff --git a/spaces/falterWliame/Face_Mask_Detection/OMSI2AddOnCitybusi280SeriesdownloadforpcTorrent.md b/spaces/falterWliame/Face_Mask_Detection/OMSI2AddOnCitybusi280SeriesdownloadforpcTorrent.md deleted file mode 100644 index 7118d4a3f03c5eeb7a98aacb8b2e1ca7e136efd8..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/OMSI2AddOnCitybusi280SeriesdownloadforpcTorrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

      OMSI2AddOnCitybusi280SeriesdownloadforpcTorrent


      Downloadhttps://urlca.com/2uDcVg



      -
      -OMSI 2: Steam Edition Free Download PC Game Cracked in Direct Link The ... Omsi Hamburg Crack Download Torrent; Omsi Bus 提供者 使用自訂式 ... 2017 · OMSI 2 – Addon Citybus i280 Series By Sebastien Friday, May ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/fatiXbelha/sd/Black Trap V2 The Ultimate Blox Fruits Script HackGUI.md b/spaces/fatiXbelha/sd/Black Trap V2 The Ultimate Blox Fruits Script HackGUI.md deleted file mode 100644 index 54976a00ada4eca394984bd65c882b9d79c1c551..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Black Trap V2 The Ultimate Blox Fruits Script HackGUI.md +++ /dev/null @@ -1,138 +0,0 @@ -
      -

      Black Trap Download Roblox: How to Get the Best Script Hack for Blox Fruits

      -

      If you are a fan of Blox Fruits, a popular Roblox game based on the anime One Piece, you might have heard of Black Trap. Black Trap is a script hack that allows you to do amazing things in the game, such as autofarm, bring chest, teleport, and kill players. In this article, we will show you how to download and install Black Trap, how to use its features and commands, and some tips and tricks for using it effectively. Let's get started!

      -

      black trap download roblox


      Download ——— https://urllie.com/2uNvcf



      -

      What is Black Trap and Why You Need It

      -

      Black Trap is a script hack for Blox Fruits, a popular Roblox game

      -

      Blox Fruits is a game where you can explore a vast open world, fight enemies, collect fruits, and become the strongest pirate or marine. The game is inspired by the anime One Piece, where people can eat devil fruits that give them special powers. There are many types of fruits in the game, such as Paramecia, Zoan, Logia, and Mythical.

      -

      Black Trap allows you to autofarm, bring chest, teleport, and kill players with ease

      -

      Black Trap is a script hack that gives you an unfair advantage over other players in Blox Fruits. With Black Trap, you can:

      -
        -
      • Autofarm: Automatically farm money, experience, bounty, reputation, and fruits without doing anything.
      • -
      • Bring chest: Bring any chest in the game to your location instantly.
      • -
      • Teleport: Teleport to any island in the game with one click.
      • -
      • Kill player: Kill any player in the game with one hit.
      • -
      -

      These features can help you level up faster, get more fruits, complete quests easier, and dominate the game.

      -

      black trap v2 blox fruits script
      -black trap script pastebin
      -black trap discord server
      -black trap autofarm bring chest
      -black trap teleports and kill player
      -black trap script hack gui
      -black trap raijin scripts
      -black trap blox fruits update 15
      -black trap script key
      -black trap script free download
      -black trap script for roblox 2023
      -black trap script showcase
      -black trap script review
      -black trap script tutorial
      -black trap script features
      -black trap script working
      -black trap script not working
      -black trap script virus
      -black trap script safe
      -black trap script legit
      -black trap script scam
      -black trap script support
      -black trap script feedback
      -black trap script alternatives
      -black trap script comparison
      -black trap script vs other scripts
      -black trap script best settings
      -black trap script how to use
      -black trap script how to install
      -black trap script how to get key
      -black trap script how to update
      -black trap script how to fix errors
      -black trap script how to bypass anti cheat
      -black trap script how to get banned
      -black trap script pros and cons
      -black trap script advantages and disadvantages
      -black trap script benefits and drawbacks
      -black trap script strengths and weaknesses
      -black trap script tips and tricks
      -black trap script hacks and cheats
      -black trap script glitches and bugs
      -black trap script codes and commands
      -black trap script fun and easy
      -black trap script fast and reliable
      -black trap script cheap and affordable
      -black trap script premium and exclusive
      -black trap script quality and performance
      -black trap script awesome and cool
      -black trap script amazing and incredible

      -

      Black Trap is easy to use and has a friendly GUI

      -

      Black Trap is not only powerful but also easy to use. It has a simple and friendly graphical user interface (GUI) that lets you access all its features and commands with ease. You can also customize the settings of Black Trap according to your preferences. For example, you can change the keybinds, the speed, the distance, and the mode of each feature.

      -

      How to Download and Install Black Trap

      -

      You need a script executor to run Black Trap

      -

      To use Black Trap, you need a script executor that can run Lua scripts in Roblox. A script executor is a program that injects code into Roblox and allows you to run scripts that modify the game. There are many script executors available online, such as Synapse X, KRNL, JJSploit, etc. However, some of them may be unsafe, outdated, or detected by Roblox. Therefore, you should always use a trusted and updated script executor to avoid any risks.

      -

      You can get Black Trap from the official website or from Pastebin

      -

      There are two ways to get Black Trap: from the official website or from Pastebin. The official website is the best source to get the latest version of Black Trap, as well as updates, news, and support. However, the official website may sometimes be down or inaccessible due to high traffic or maintenance. In that case, you can use Pastebin as an alternative source. Pastebin is a website where you can store and share text online. You can find the Black Trap script on Pastebin by searching for its name or by using this link: [https://pastebin.com/BlackTrap].

      -

      You need to enter a key to access Black Trap

      -

      Before you can use Black Trap, you need to enter a key that verifies that you are a human and not a bot. The key is a random string of letters and numbers that you need to copy and paste into the Black Trap GUI. You can get the key from the official website or from Pastebin. The key changes every day, so you need to get a new one every time you want to use Black Trap.

      -

      You need to follow the instructions on how to inject Black Trap into Roblox

      -

      After you have the script executor, the Black Trap script, and the key, you are ready to inject Black Trap into Roblox. To do that, you need to follow these steps:

      -
        -
      1. Open your script executor and select Roblox as the target process.
      2. -
      3. Copy and paste the Black Trap script into the script executor.
      4. -
      5. Press the execute button and wait for the Black Trap GUI to appear.
      6. -
      7. Copy and paste the key into the Black Trap GUI and press OK.
      8. -
      9. Enjoy using Black Trap!
      10. -
      -

      How to Use Black Trap Features and Commands

      -

      You can access the Black Trap menu by pressing F9

      -

      Once you have injected Black Trap into Roblox, you can access its menu by pressing F9 on your keyboard. The menu will show you all the features and commands of Black Trap, as well as their status, keybinds, and settings. You can also close or reopen the menu by pressing F9 again.

      -

      You can toggle the autofarm feature by pressing F1

      -

      The autofarm feature is one of the most useful features of Black Trap. It allows you to automatically farm money, experience, bounty, reputation, and fruits without doing anything. You can toggle the autofarm feature on or off by pressing F1 on your keyboard. When the autofarm feature is on, you will see a green text saying "Autofarm ON" on the top left corner of your screen. When it is off, you will see a red text saying "Autofarm OFF". You can also change the speed, distance, and mode of the autofarm feature in the settings.

      -

      You can bring chest to your location by pressing F2

      -

      The bring chest feature is another useful feature of Black Trap. It allows you to bring any chest in the game to your location instantly. You can use this feature to get more money and items easily. You can activate the bring chest feature by pressing F2 on your keyboard. When you do that, you will see a blue text saying "Bring Chest ON" on the top left corner of your screen. To deactivate it, press F2 again and you will see a red text saying "Bring Chest OFF". You can also change the distance and mode of the bring chest feature in the settings.

      You can teleport to any island by pressing F3

      -

      The teleport feature is another handy feature of Black Trap. It allows you to teleport to any island in the game with one click. You can use this feature to travel faster, explore new areas, and complete quests easier. You can activate the teleport feature by pressing F3 on your keyboard. When you do that, you will see a yellow text saying "Teleport ON" on the top left corner of your screen. To deactivate it, press F3 again and you will see a red text saying "Teleport OFF". You can also change the mode of the teleport feature in the settings.

      -

      You can kill any player by pressing F4

      -

      The kill player feature is the most powerful and dangerous feature of Black Trap. It allows you to kill any player in the game with one hit. You can use this feature to eliminate your enemies, get more bounty, and dominate the game. However, you should use this feature with caution and avoid getting reported or banned by other players or Roblox. You can activate the kill player feature by pressing F4 on your keyboard. When you do that, you will see a pink text saying "Kill Player ON" on the top left corner of your screen. To deactivate it, press F4 again and you will see a red text saying "Kill Player OFF". You can also change the distance and mode of the kill player feature in the settings.

      -

      Tips and Tricks for Using Black Trap Effectively

      -

      You should use Black Trap with caution and avoid getting reported or banned

      -

      Black Trap is a script hack that gives you an unfair advantage over other players in Blox Fruits. However, this also means that you are violating the rules and terms of service of Roblox and Blox Fruits. Therefore, you should use Black Trap with caution and avoid getting reported or banned by other players or Roblox. Some tips to avoid getting caught are:

      -
        -
      • Do not use Black Trap in public servers or crowded areas where other players can see you.
      • -
      • Do not use Black Trap excessively or unnecessarily where it can attract attention or suspicion.
      • -
      • Do not use Black Trap to harass, bully, or ruin the game for other players.
      • -
      • Do not brag or boast about using Black Trap or share it with others who might report you.
      • -
      • Do not use Black Trap on your main account or on an account that has valuable items or progress.
      • -
      -

      You should use Black Trap only when you need it and not abuse it

      -

      Black Trap is a script hack that allows you to do amazing things in Blox Fruits, such as autofarm, bring chest, teleport, and kill players. However, this also means that you are taking away the fun and challenge of the game. Therefore, you should use Black Trap only when you need it and not abuse it. Some tips to use Black Trap moderately are:

      -
        -
      • Use Black Trap only when you are stuck or bored in the game and need a boost.
      • -
      • Use Black Trap only for specific purposes or goals and not for everything.
      • -
      • Use Black Trap only for a short period of time and not for hours or days.
      • -
      • Use Black Trap only for yourself and not for others who might not want it.
      • -
      • Use Black Trap only as a last resort and not as a first option.
      • -
      -

      You should use Black Trap wisely and not ruin the game for others

      -

      Black Trap is a script hack that allows you to do amazing things in Blox Fruits, such as autofarm, bring chest, teleport, and kill players. However, this also means that you are affecting the game for others who are playing fairly and legitimately. Therefore, you should use Black Trap wisely and not ruin the game for others. Some tips to use Black Trap responsibly are:

      -
        -
      • Use Black Trap only in private servers or solo modes where you are not interfering with other players.
      • -
      • Use Black Trap only for fun or entertainment and not for malicious or harmful purposes.
      • -
      • Use Black Trap only for personal gain and not for unfair competition or advantage over other players.
      • -
      • Use Black Trap only with consent and respect and not without permission or consideration.
      • -
      • Use Black Trap only as a tool and not as a crutch or a substitute for skill or effort.
      • -
      -

      Conclusion and FAQs

      -

      In conclusion, Black Trap is a script hack that allows you to do amazing things in Blox Fruits, such as autofarm, bring chest, teleport, and kill players. However, it also comes with risks and drawbacks, such as getting reported or banned by other players or Rob lox and losing the fun and challenge of the game. Therefore, you should use Black Trap with caution, moderation, and responsibility. You should also respect the rules and terms of service of Roblox and Blox Fruits, as well as the rights and feelings of other players. Remember, Black Trap is a script hack that can enhance your gaming experience, but it is not a substitute for your own skill, effort, and enjoyment.

      -

      Here are some frequently asked questions (FAQs) about Black Trap:

      -
        -
      1. Is Black Trap safe to use?
      2. -

        Black Trap is safe to use as long as you use a trusted and updated script executor and avoid getting detected or reported by other players or Roblox. However, there is always a risk of getting banned or losing your account when using any script hack, so use it at your own discretion and responsibility.

        -
      3. Is Black Trap free to use?
      4. -

        Black Trap is free to use for anyone who can access its script and key. However, some script executors may require you to pay a fee or complete a survey to use them. You should always check the legitimacy and reputation of the script executor before using it.

        -
      5. Is Black Trap updated regularly?
      6. -

        Black Trap is updated regularly by its developers to keep up with the changes and updates of Roblox and Blox Fruits. However, sometimes there may be delays or errors in the updates, so you should always check the official website or Pastebin for the latest version of Black Trap.

        -
      7. Can I use Black Trap on other Roblox games?
      8. -

        Black Trap is designed specifically for Blox Fruits and may not work on other Roblox games. However, some features or commands of Black Trap may be compatible with other games, such as teleport or kill player. You can try using Black Trap on other games at your own risk, but do not expect it to work properly or at all.

        -
      9. Can I share Black Trap with others?
      10. -

        You can share Black Trap with others who are interested in using it, but you should do so with caution and discretion. You should not share Black Trap with anyone who might report you or abuse it. You should also respect the intellectual property and credit of the developers of Black Trap and not claim it as your own or modify it without permission.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download CapCut for PC and Enjoy Professional Video Editing Features.md b/spaces/fatiXbelha/sd/Download CapCut for PC and Enjoy Professional Video Editing Features.md deleted file mode 100644 index 182f403b6af33d420401282870b3daa4f451be7a..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download CapCut for PC and Enjoy Professional Video Editing Features.md +++ /dev/null @@ -1,134 +0,0 @@ - -

      How to Download and Use CapCut on PC

      -

      CapCut is a popular video editing app that allows you to create stunning videos for social media platforms like TikTok, Instagram, YouTube, and more. It is developed by ByteDance, the same company behind TikTok, and offers a variety of features and effects to enhance your videos. However, CapCut is mainly designed for mobile devices, so you may wonder how to use it on your PC. In this article, we will show you how to download and use CapCut on PC with an emulator or a web version, as well as the pros and cons of using CapCut on PC.

      -

      What is CapCut and Why Use It on PC?

      -

      CapCut is a free and powerful video editor for mobile devices

      -

      CapCut is a free video editing app that lets you create professional-looking videos with ease. You can use it to trim, crop, rotate, merge, split, reverse, speed up, slow down, add transitions, filters, stickers, texts, music, sound effects, voice-overs, captions, and more. You can also use advanced features like keyframe animation, smooth slow-motion, chroma key, picture-in-picture, stabilization, background removal, speech-to-text, text-to-speech, etc. CapCut supports various video formats and resolutions, and allows you to export your videos without any watermark or quality loss.

      -

      capcut download pc


      Downloadhttps://urllie.com/2uNIv4



      -

      CapCut offers many features and effects to enhance your videos

      -

      CapCut has a user-friendly interface and a magnetic timeline that makes video editing easy and fun. You can access a rich library of stock sounds and animations that suit different themes and moods. You can also customize your videos with hundreds of fonts, colors, filters, stickers, texts, effects, etc. You can adjust the brightness, contrast, saturation, temperature, hue, vignette, blur, grain, etc. of your videos. You can also apply transitions like fade in/out, slide in/out, zoom in/out, wipe in/out, etc. to make your videos more dynamic.

      -

      CapCut can be used on PC with an emulator or a web version

      -

      Although CapCut is mainly designed for mobile devices, you can still use it on your PC with an emulator or a web version. An emulator is a software that simulates the environment of a mobile device on your PC. This way, you can run mobile apps like Cap Cut on your PC. A web version is a website that allows you to use CapCut online without downloading or installing anything. Both methods have their advantages and disadvantages, which we will discuss later.

      -

      How to Download and Install CapCut on PC with an Emulator

      -

      Choose an emulator that supports CapCut, such as BlueStacks or NoxPlayer

      -

      An emulator is a software that simulates the environment of a mobile device on your PC. This way, you can run mobile apps like CapCut on your PC. There are many emulators available for Windows and Mac, but not all of them support CapCut. Some of the emulators that support CapCut are BlueStacks, NoxPlayer, LDPlayer, MEmu, etc. You can choose any of them according to your preference and system requirements.

      -

      Download and install the emulator on your PC

      -

      Once you have chosen an emulator that supports CapCut, you need to download and install it on your PC. You can visit the official website of the emulator and follow the instructions to download and install it. The installation process may vary depending on the emulator, but it is usually simple and straightforward. You may need to grant some permissions and accept some terms and conditions during the installation.

      -

      capcut video editor for pc free download
      -how to install capcut on pc windows 10
      -capcut desktop version download
      -capcut app for pc online
      -capcut for pc microsoft store
      -capcut for pc without bluestacks
      -capcut for pc windows 7 32 bit
      -capcut for pc windows 8.1
      -capcut for pc macbook
      -capcut for pc linux
      -capcut for pc apk
      -capcut for pc crack
      -capcut for pc full version
      -capcut for pc offline installer
      -capcut for pc review
      -capcut for pc tutorial
      -capcut for pc system requirements
      -capcut for pc features
      -capcut for pc alternatives
      -capcut for pc pros and cons
      -capcut video editing software for pc
      -how to use capcut on pc
      -how to download capcut on laptop
      -how to update capcut on pc
      -how to uninstall capcut on pc
      -how to transfer capcut videos from phone to pc
      -how to add music to capcut on pc
      -how to add text to capcut on pc
      -how to add effects to capcut on pc
      -how to crop video in capcut on pc
      -how to cut video in capcut on pc
      -how to merge videos in capcut on pc
      -how to speed up video in capcut on pc
      -how to slow down video in capcut on pc
      -how to reverse video in capcut on pc
      -how to zoom in video in capcut on pc
      -how to rotate video in capcut on pc
      -how to change background in capcut on pc
      -how to remove watermark in capcut on pc
      -how to make slideshow in capcut on pc
      -how to make tiktok video in capcut on pc
      -how to make youtube video in capcut on pc
      -how to make instagram video in capcut on pc
      -how to make facebook video in capcut on pc
      -how to make ads in capcut on pc
      -best settings for capcut on pc
      -best fonts for capcut on pc
      -best music for capcut on pc
      -best effects for capcut on pc

      -

      Launch the emulator and sign in with your Google account

      -

      After installing the emulator, you need to launch it and sign in with your Google account. This is necessary to access the Google Play Store and download CapCut. If you don't have a Google account, you can create one for free. Signing in with your Google account will also sync your data and settings across your devices.

      -

      Search for CapCut in the emulator's app store and install it

      -

      Once you have signed in with your Google account, you can search for CapCut in the emulator's app store. You can either use the search bar or browse the categories to find CapCut. Then, you can click on the install button and wait for the app to download and install on your PC.

      -

      Open CapCut and start editing your videos

      -

      After installing CapCut, you can open it and start editing your videos. You can use the same features and functions as you would on your mobile device. You can import your videos from your PC or record them with your webcam. You can also access the stock sounds and animations from the app store. You can edit your videos with the magnetic timeline and the various tools and effects. You can preview your videos before exporting them.

      -

      How to Use CapCut Online on PC without an Emulator

      -

      Visit the official website of CapCut at [1](https://www.capcut.com/)

      -

      If you don't want to use an emulator to run CapCut on your PC, you can use the web version of CapCut instead. The web version is a website that allows you to use CapCut online without downloading or installing anything. You can visit the official website of CapCut at [1](https://www.capcut.com/) to access the web version.

      -

      Click on "Open in browser" and sign in with your account

      -

      On the website, you will see a button that says "Open in browser". You can click on it to launch the web version of CapCut. You will need to sign in with your account to use CapCut online. You can use your TikTok, Facebook, Google, or Apple account to sign in. Signing in with your account will also sync your data and settings across your devices.

      -

      Upload your videos or choose from the templates

      -

      After signing in, you can upload your videos from your PC or choose from the templates provided by CapCut. The templates are categorized by themes and moods, such as love, travel, birthday, etc. You can also search for templates by keywords or hashtags. The templates are pre-edited with music, effects, transitions, etc., so you only need to replace the clips with yours.

      -

      Edit your videos with the online tools and features

      -

      After uploading your videos or choosing a template, you can edit your videos with the online tools and features provided by CapCut. You can use the same features and functions as you would on your mobile device. You can trim, crop, rotate, merge, split, reverse, speed up, slow down, add transitions, filters, stickers, texts, music, sound effects, voice-overs, captions, and more. You can also use advanced features like keyframe animation, smooth slow-motion, chroma key, picture-in-picture, stabilization, background removal, speech-to-text, text-to-speech, etc. You can adjust the brightness, contrast, saturation, temperature, hue, vignette, blur, grain, etc. of your videos. You can also apply transitions like fade in/out, slide in/out, zoom in/out, wipe in/out, etc. to make your videos more dynamic.

      -

      Download or share your videos directly to TikTok or other platforms

      -

      After editing your videos, you can download or share them directly to TikTok or other platforms. You can choose the format and resolution of your videos before downloading them. You can also add hashtags and captions to your videos before sharing them. You can share your videos to TikTok, Instagram, YouTube, Facebook, WhatsApp, Snapchat, etc. with one click.

      -

      Pros and Cons of Using CapCut on PC

      -

      Pros of using CapCut on PC

      -

      Larger screen and better control

      -

      One of the advantages of using CapCut on PC is that you can enjoy a larger screen and better control. You can see more details and edit more precisely on a bigger monitor. You can also use a mouse and a keyboard to navigate and edit more easily and efficiently.

      -

      More storage space and faster performance

      -

      Another advantage of using CapCut on PC is that you can have more storage space and faster performance. You don't have to worry about running out of space or memory on your mobile device. You can also edit longer and higher-quality videos without any lag or crash.

      -

      Easier to transfer files and share videos

      -

      A third advantage of using CapCut on PC is that you can transfer files and share videos more easily. You can import and export your videos from your PC or external devices without any hassle. You can also share your videos to multiple platforms or devices with a simple drag and drop.

      -

      Cons of using CapCut on PC

      -

      Requires an emulator or an internet connection

      -

      One of the disadvantages of using CapCut on PC is that you need an emulator or an internet connection. If you use an emulator, you need to download and install it on your PC, which may take some time and space. You also need to update it regularly to ensure its compatibility and security. If you use the web version, you need a stable and fast internet connection to access and use CapCut online.

      -

      Limited to one video and audio track

      -

      Another disadvantage of using CapCut on PC is that you are limited to one video and audio track. Unlike some other video editors that allow you to add multiple video and audio tracks, CapCut only supports one video and one audio track at a time. This means that you cannot overlay multiple clips or sounds on your videos.

      -

      May encounter some bugs or compatibility issues

      -

      A third disadvantage of using CapCut on PC is that you may encounter some bugs or compatibility issues. Since CapCut is mainly designed for mobile devices, it may not work well on some PCs or browsers. You may experience some glitches, errors, crashes, or freezes while using CapCut on PC.

      -

      Conclusion

      -

      CapCut is a free and powerful video editing app that allows you to create stunning videos for social media platforms like TikTok, Instagram, YouTube, and more. It offers a variety of features and effects to enhance your videos. However, CapCut is mainly designed for mobile devices, so you may wonder how to use it on your PC. In this article, we showed you how to download and use CapCut on PC with an emulator or a web version, as well as the pros and cons of using CapCut on PC. We hope this article was helpful for you. If you have any questions or feedback, please feel free to leave a comment below.

      -

      FAQs

      -

      Is CapCut safe to use?

      -

      Yes, CapCut is safe to use as long as you download it from the official sources or websites. CapCut is developed by ByteDance, the same company behind TikTok, and has millions of users worldwide. CapCut does not contain any malware, spyware, or viruses that may harm your device or data. However, you should be careful about the permissions and privacy settings that you grant to CapCut, and avoid sharing any sensitive or personal information on the app.

      -

      Is CapCut free to use?

      -

      Yes, CapCut is free to use and does not require any subscription or registration. You can download and use CapCut on your mobile device or PC without any cost. You can also access all the features and effects of CapCut without any limitation or watermark. However, CapCut may display some ads or offer some in-app purchases that you can choose to ignore or buy.

      -

      How to update CapCut on PC?

      -

      If you use CapCut on PC with an emulator, you can update it by following these steps:

      -
        -
      • Launch the emulator and open the app store.
      • -
      • Search for CapCut and check if there is any update available.
      • -
      • If there is an update, click on the update button and wait for the app to download and install the latest version.
      • -
      • If there is no update, you can uninstall and reinstall CapCut to get the latest version.
      • -
      -

      If you use CapCut online on PC without an emulator, you don't need to update it manually. The web version of CapCut will automatically update itself whenever there is a new version available.

      -

      How to delete videos from CapCut on PC?

      -

      If you want to delete videos from CapCut on PC, you can follow these steps:

      -
        -
      • Open CapCut and go to the home screen.
      • -
      • Select the video that you want to delete and click on the trash icon at the bottom right corner.
      • -
      • Confirm your action by clicking on "Delete" in the pop-up window.
      • -
      • The video will be deleted from CapCut and your device.
      • -
      -

      How to add subtitles to videos in CapCut on PC?

      -

      If you want to add subtitles to videos in CapCut on PC, you can follow these steps:

      -
        -
      • Open CapCut and import or record a video.
      • -
      • Click on the "Text" icon at the bottom of the screen.
      • -
      • Choose a style and a font for your subtitles.
      • -
      • Type in your subtitles in the text box and adjust the size, color, position, alignment, etc.
      • -
      • Drag and drop the subtitles on the timeline and adjust the duration and timing.
      • -
      • You can also use the speech-to-text feature to automatically generate subtitles from your voice-over or audio track.
      • -
      • Preview your video and export it with subtitles.
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Incredibox Apkpure and Mix and Match Different Sounds and Voices.md b/spaces/fatiXbelha/sd/Download Incredibox Apkpure and Mix and Match Different Sounds and Voices.md deleted file mode 100644 index 5067cb9d1abc6d514ab12e84b6ed699d4b1b1c40..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Incredibox Apkpure and Mix and Match Different Sounds and Voices.md +++ /dev/null @@ -1,146 +0,0 @@ - -

      Download Incredibox APKPure: A Fun and Easy Music App

      -

      Do you love music and want to create your own beats and melodies? Do you want to have fun and express yourself with a simple and intuitive app? If you answered yes, then you should download Incredibox APKPure, a music app that lets you create your own music with the help of a merry crew of beatboxers.

      -

      What is Incredibox?

      -

      Incredibox is a music app that was created by the French company So Far So Good in 2009. It is a game that allows you to mix and match different sounds and effects to create your own music. You can choose from nine different musical styles, each with its own atmosphere and characters. You can also record and share your creations with other users around the world.

      -

      download incredibox apkpure


      DOWNLOAD > https://urllie.com/2uNwWT



      -

      How to play Incredibox?

      -

      Playing Incredibox is very easy and fun. All you need to do is drag and drop icons onto the characters to make them sing, beatbox, or play an instrument. You can combine up to seven sounds at a time, and adjust the volume and tempo as you like. You can also unlock bonus animations and sounds by creating certain combinations.

      -

      What are the features of Incredibox?

      -

      Incredibox has many features that make it a unique and enjoyable music app. Some of them are:

      -
        -
      • You can choose from nine musical styles: Alpha, Little Miss, Sunrise, The Love, Brazil, Alive, Jeevan, Dystopia, and The Future.
      • -
      • You can create your own music by mixing and matching different sounds and effects.
      • -
      • You can record and share your music with other users on the app or on social media.
      • -
      • You can listen to other users' creations and vote for your favorites.
      • -
      • You can discover new sounds and animations by unlocking bonuses.
      • -
      • You can enjoy the colorful and quirky graphics and characters.
      • -
      -

      Why download Incredibox APKPure?

      -

      If you are looking for a fun and easy way to create your own music, then you should download Incredibox APKPure. APKPure is a website that offers free and safe downloads of Android apps and games. By downloading Incredibox APKPure, you can enjoy the following benefits:

      -

      How to download Incredibox APKPure?

      -

      Downloading Incredibox APKPure is very simple and fast. Just follow these steps:

      -
        -
      1. Go to Incredibox APK (Android Game) - Free Download - APKCombo.
      2. -
      3. Click on the green "Download" button.
      4. -
      5. Choose the version of the app that suits your device.
      6. -
      7. Wait for the download to finish.
      8. -
      9. Install the app on your device.
      10. -
      11. Enjoy creating your own music with Incredibox!
      12. -
      -

      What are the benefits of downloading Incredibox APKPure?

      -

      By downloading Incredibox APKPure, you can enjoy the following benefits:

      -
        -
      • You can access the latest version of the app without any delays or errors.
      • -
      • You can save storage space on your device by downloading a smaller file size.
      • -
      • You can avoid any malware or viruses that might harm your device.
      • -
      • You can play the app offline without any internet connection.
      • -
      • You can have fun and unleash your creativity with a simple and intuitive music app.
      • -
      -

      How to use Incredibox APKPure?

      -

      Using Incredibox APKPure is very easy and fun. You can create your own music in minutes by following these steps:

      -

      How to create your own music with Incredibox APKPure?

      -

      Creating your own music with Incredibox APKPure is very simple and fun. You can choose from nine different musical styles, each with its own atmosphere and characters. You can also mix and match different sounds and effects to create your own unique music. Here are some tips to help you create your own music with Incredibox APKPure:

      -
        -
      • Start by choosing a musical style that suits your mood and preference. You can tap on the icons at the bottom of the screen to switch between different styles.
      • -
      • Drag and drop icons onto the characters to make them sing, beatbox, or play an instrument. You can combine up to seven sounds at a time, and adjust the volume and tempo as you like.
      • -
      • Experiment with different combinations and see what sounds good to you. You can also unlock bonus animations and sounds by creating certain combinations.
      • -
      • If you want to remove a sound, just drag it back to the bottom of the screen.
      • -
      • If you want to start over, just tap on the "reset" button at the top right corner of the screen.
      • -
      -

      How to record and share your music with Incredibox APKPure?

      -

      Recording and sharing your music with Incredibox APKPure is very easy and fun. You can record your music and share it with other users on the app or on social media. Here are some steps to help you record and share your music with Incredibox APKPure:

      -

      download incredibox mod apk
      -download incredibox for android
      -download incredibox apk latest version
      -download incredibox free apk
      -download incredibox premium apk
      -download incredibox apk full version
      -download incredibox apk unlocked
      -download incredibox game apk
      -download incredibox music app apk
      -download incredibox apk 0.5.4
      -download incredibox apk offline
      -download incredibox apk no ads
      -download incredibox apk for pc
      -download incredibox apk for ios
      -download incredibox apk for windows 10
      -download incredibox apk for mac
      -download incredibox apk for chromebook
      -download incredibox apk for laptop
      -download incredibox apk for tablet
      -download incredibox apk for firestick
      -download incredibox apkpure mod
      -download incredibox apkpure free
      -download incredibox apkpure latest
      -download incredibox apkpure premium
      -download incredibox apkpure full
      -download incredibox apkpure unlocked
      -download incredibox apkpure game
      -download incredibox apkpure music
      -download incredibox apkpure 0.5.4
      -download incredibox apkpure offline
      -download incredibox apkpure no ads
      -download incredibox apkpure pc
      -download incredibox apkpure ios
      -download incredibox apkpure windows 10
      -download incredibox apkpure mac
      -download incredibox apkpure chromebook
      -download incredibox apkpure laptop
      -download incredibox apkpure tablet
      -download incredibox apkpure firestick
      -how to download incredibox apk from apkpure

      -
        -
      1. When you are happy with your music, tap on the "record" button at the top left corner of the screen.
      2. -
      3. Give a name to your music and tap on "save".
      4. -
      5. Your music will be uploaded to the app's server and you will get a link to share it.
      6. -
      7. You can copy the link and paste it on any social media platform or messaging app.
      8. -
      9. You can also listen to other users' creations and vote for your favorites on the app.
      10. -
      -

      How to explore other music styles with Incredibox APKPure?

      -

      Exploring other music styles with Incredibox APKPure is very easy and fun. You can discover new sounds and animations by unlocking bonuses. You can also listen to other users' creations and vote for your favorites. Here are some ways to explore other music styles with Incredibox APKPure:

      -
        -
      • To unlock bonuses, you need to create certain combinations of sounds and effects. You can find hints for the bonuses on the app's website or on YouTube.
      • -
      • When you unlock a bonus, you will see a special animation and hear a new sound. You can also access the bonus sounds from the bottom of the screen.
      • -
      • To listen to other users' creations, you can tap on the "mix" button at the top right corner of the screen. You will see a list of popular mixes that you can play and vote for.
      • -
      • You can also search for mixes by name, style, or date. You can also filter the mixes by language, country, or genre.
      • -
      -

      Conclusion

      -

      Incredibox APKPure is a fun and easy music app that lets you create your own music with the help of a merry crew of beatboxers. You can choose from nine different musical styles, each with its own atmosphere and characters. You can also mix and match different sounds and effects to create your own unique music. You can also record and share your music with other users on the app or on social media. You can also explore other music styles by unlocking bonuses and listening to other users' creations.

      -

      If you love music and want to have fun and express yourself with a simple and intuitive app, then you should download Incredibox APKPure today. It is a free and safe download that offers many benefits for your device and your creativity. Download Incredibox APKPure now and enjoy creating your own music!

      -

      FAQs

      -

      Here are some frequently asked questions about Incredibox APKPure:

      -
        -
      1. What is Incredibox APKPure?
      2. -

        Incredibox APKPure is a free and safe download of Incredibox, a music app that lets you create your own music with the help of a merry crew of beatboxers.

        -
      3. How do I download Incredibox APKPure?
      4. -

        You can download Incredibox APKPure from Incredibox APK (Android Game) - Free Download - APKCombo. Just click on the green "Download" button, choose the version of the app that suits your device, wait for the download to finish, and install the app on your device.

        -
      5. What are the benefits of downloading Incredibox APKPure?
      6. -

        By downloading Incredibox APKPure, you can enjoy the following benefits:

        -
          -
        • You can access the latest version of the app without any delays or errors.
        • -
        • You can save storage space on your device by downloading a smaller file size.
        • -
        • You can avoid any malware or viruses that might harm your device.
        • -
        • You can play the app offline without any internet connection.
        • -
        • You can have fun and unleash your creativity with a simple and intuitive music app.
        • -
        -
      7. How do I create my own music with Incredibox APKPure?
      8. -

        Creating your own music with Incredibox APKPure is very simple and fun. You can choose from nine different musical styles, each with its own atmosphere and characters. You can also mix and match different sounds and effects to create your own unique music. Here are some tips to help you create your own music with Incredibox APKPure:

        -
          -
        • Start by choosing a musical style that suits your mood and preference. You can tap on the icons at the bottom of the screen to switch between different styles.
        • -
        • Drag and drop icons onto the characters to make them sing, beatbox, or play an instrument. You can combine up to seven sounds at a time, and adjust the volume and tempo as you like.
        • -
        • Experiment with different combinations and see what sounds good to you. You can also unlock bonus animations and sounds by creating certain combinations.
        • -
        • If you want to remove a sound, just drag it back to the bottom of the screen.
        • -
        • If you want to start over, just tap on the "reset" button at the top right corner of the screen.
        • -
        -
      9. How do I record and share my music with Incredibox APKPure?
      10. -

        Recording and sharing your music with Incredibox APKPure is very easy and fun. You can record your music and share it with other users on the app or on social media. Here are some steps to help you record and share your music with Incredibox APKPure:

        -
          -
        1. When you are happy with your music, tap on the "record" button at the top left corner of the screen.
        2. -
        3. Give a name to your music and tap on "save".
        4. -
        5. Your music will be uploaded to the app's server and you will get a link to share it.
        6. -
        7. You can copy the link and paste it on any social media platform or messaging app.
        8. -
        9. You can also listen to other users' creations and vote for your favorites on the app.
        10. -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Evertale Premium APK Enjoy the Full Version of the Game for Free.md b/spaces/fatiXbelha/sd/Evertale Premium APK Enjoy the Full Version of the Game for Free.md deleted file mode 100644 index 0e8586b33301fbdc0747908ceb8a5732f853398f..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Evertale Premium APK Enjoy the Full Version of the Game for Free.md +++ /dev/null @@ -1,122 +0,0 @@ - -

        Evertale Premium APK: A Guide for RPG Fans

        -

        If you are a fan of role-playing games, especially those that involve catching and training monsters, then you might have heard of Evertale. It is a popular game that has been compared to Pokémon due to its fighting style and monster collection system. However, Evertale is more than just a Pokémon clone. It has a rich and immersive story, a vast and beautiful open world, and a challenging and strategic combat system. It also has a multiplayer mode where you can join guilds, compete in PvP leagues, and cooperate with other players.

        -

        evertale premium apk


        Downloadhttps://urllie.com/2uNDBV



        -

        In this article, we will give you an overview of what Evertale is, what Evertale Premium APK is, and how to download and install it. We will also share some tips and tricks on how to play Evertale better and enjoy its features more. Whether you are a beginner or an expert in Evertale, this article will help you get the most out of this amazing game.

        -

        What is Evertale?

        -

        Evertale is a game developed by ZigZaGame Inc. that was released in March 2019 for Android and iOS devices. It has been downloaded over 5 million times from the Google Play Store alone, and has received positive reviews from players and critics alike. It is considered one of the best Pokémon-like games to play on mobile, but it also has its own unique features that make it stand out from other games in the genre.

        -

        A captivating story-driven adventure

        -

        One of the main attractions of Evertale is its story mode, where you will follow the journey of two young heroes and their allies as they try to save the world of Erden from an ancient curse called the Pandemonium. The Pandemonium is a shroud of evil that descends once every 100 years, bringing chaos and destruction to the land. Only the fabled Crestbearers can stop it, but all have failed so far.

        -

        You will explore six diverse regions of Erden, each with their own unique monsters to collect, quests to complete, secrets to discover, and bosses to defeat. You will also encounter friends and foes along the way, who will help or hinder your progress. The story mode is engaging and immersive, with well-written dialogues, stunning graphics, and epic music.

        -

        evertale mod apk free shopping
        -evertale apk download latest version
        -evertale unlimited soul stone apk
        -evertale offline mod apk
        -evertale apk obb
        -evertale hack apk android
        -evertale full version apk
        -evertale apk mod menu
        -evertale paid apk free download
        -evertale 2.0.85 mod apk
        -evertale apk pure
        -evertale mod apk unlimited money
        -evertale cracked apk
        -evertale mod apk rexdl
        -evertale mod apk happymod
        -evertale mod apk revdl
        -evertale pro apk download
        -evertale mod apk no root
        -evertale mod apk platinmods
        -evertale mod apk an1
        -evertale premium apk 2021
        -evertale mod apk latest update
        -evertale modded apk download
        -evertale mod apk offline mode
        -evertale vip mod apk
        -evertale mod apk god mode
        -evertale mod apk unlimited everything
        -evertale premium unlocked apk
        -evertale modded premium apk free download
        -evertale hack version download apk
        -evertale modded premium unlocked free shopping offline mode latest version download for android 2021 no root required hack cheat unlimited soul stone money gold gems coins resources items weapons equipment characters monsters capture train evolve power up explore realms online multiplayer challenges epic fights battles adventure fantasy role playing game rpg zipato studio zigzagame inc app application software file package installer installation setup exe executable program tool utility generator service provider website platform source link url web page address site domain name server host hosting provider cloud storage space server location server ip address server name server status server uptime server load server speed server response time server security server encryption server certificate server authentication server authorization server verification server validation server backup server restore server recovery server maintenance server update server upgrade server patch server fix server error server issue server problem server solution server support server help desk server contact information server feedback server review server rating server comment server suggestion server recommendation[^1^]

        -

        A monster-catching and training system

        -

        Another feature that makes Evertale appealing is its monster-catching and training system. There are over 180 monsters and heroes that you can catch, train, and evolve in this game. You can find them in various locations in the map, such as bushes, caves, dungeons, or hidden areas. You can also summon them using soul stones or crystals that you can earn or buy in the game.

        -

        To catch a monster, you need to weaken it first by battling it using your own monsters or heroes. Then, you can use a capture crystal to try to capture it. The lower the health of the monster, the higher the chance of capturing it successfully. However, some monsters are more difficult to catch than others, so you may need to use more than one crystal or try again later.

        -

        Once you catch a monster or hero, you can add it to your party or store it in your

        inventory. You can also customize their names, appearances, and skills. You can train them by battling other monsters or heroes, using items, or completing quests. You can also evolve them into more powerful forms by using certain items or reaching certain levels. Evolving a monster or hero will change its appearance, stats, and skills.

        -

        A strategic turn-based combat

        -

        The combat system of Evertale is turn-based, meaning that you and your opponent will take turns to attack each other using your monsters or heroes. Each monster or hero has a set of skills that they can use in battle, such as physical attacks, magical spells, buffs, debuffs, heals, or special moves. Some skills require mana to use, which is a resource that regenerates over time. Some skills also have cooldowns, which means that you have to wait for a certain number of turns before you can use them again.

        -

        The combat system of Evertale is also strategic, meaning that you have to consider various factors before making your move. For example, you have to pay attention to the turn order, which is determined by the speed stat of each monster or hero. You can also manipulate the turn order by using skills that increase or decrease the speed of yourself or your opponent. You also have to consider the team spirit mechanic, which is a gauge that fills up as you attack or get attacked. When the team spirit gauge is full, you can unleash a powerful team attack that hits all enemies at once.

        -

        Additionally, you have to consider the elemental affinities of each monster or hero. There are six elements in Evertale: fire, water, earth, wind, light, and dark. Each element has its own strengths and weaknesses against other elements. For example, fire is strong against wind but weak against water. You can use this knowledge to your advantage by choosing the right monsters or heroes for each battle.

        -

        A multiplayer mode with guilds and PvP leagues

        -

        Besides the story mode, Evertale also has a multiplayer mode where you can interact with other players from around the world. You can join guilds, which are groups of players who share a common interest or goal. You can chat with your guild members, help them with quests, or participate in guild wars against other guilds.

        -

        You can also compete in PvP leagues, which are ranked battles where you can test your skills and strategies against other players. You can choose from different leagues based on your level and rank. You can earn rewards such as soul stones, crystals, items, and badges by winning battles and climbing up the leaderboards.

        -

        What is Evertale Premium APK?

        -

        Evertale Premium APK is a modified version of the original Evertale game that allows you to enjoy some extra features that are not available in the official version. The main feature of Evertale Premium APK is that it gives you free shopping in the game. This means that you can buy anything you want from the shop without spending any real money.

        -

        For example, you can buy unlimited soul stones and crystals, which are the premium currencies of the game. You can use them to summon rare and powerful monsters and heroes, or to buy other items such as potions, capture crystals, evolution stones, and more. You can also buy unlimited gold coins and silver coins, which are the regular currencies of the game. You can use them to upgrade your weapons and armor, or to buy other items such as food, drinks, books, and more.

        -

        The benefits and risks of using it

        -

        The main benefit of using Evertale Premium APK is that it makes the game easier and more fun to play. You don't have to worry about running out of resources or spending real money on the game. You can enjoy all the features and content of the game without any limitations or restrictions.

        -

        However, there are also some risks involved in using Evertale Premium APK. First of all, it is not an official version of the game, so it may not be compatible with the latest updates and patches of the game. It may also cause some glitches, bugs, or errors in the game that may affect your gameplay experience. Secondly, it is not a legal or authorized version of the game, so it may violate the terms and conditions of the game developer and publisher. It may also expose you to security risks such as viruses, malware, or spyware that may harm your device or personal data. Thirdly, it may affect the balance and fairness of the game, especially in the multiplayer mode. It may give you an unfair advantage over other players who are playing the game legitimately. It may also ruin the challenge and satisfaction of playing the game as intended.

        -

        Therefore, we do not recommend using Evertale Premium APK unless you are fully aware of the consequences and risks involved. We also advise you to respect the rights and efforts of the game developer and publisher, and to support them by playing the game legally and ethically.

        -

        How to download and install it

        -

        If you still want to try Evertale Premium APK, you will need to follow some steps to download and install it on your device. Here are the steps:

        -
          -
        1. First, you will need to uninstall the original Evertale game from your device if you have it installed already. This is to avoid any conflicts or errors between the two versions of the game.
        2. -
        3. Second, you will need to enable the installation of apps from unknown sources on your device. This is to allow your device to install Evertale Premium APK, which is not available on the official app stores. To do this, go to your device settings, then security, then unknown sources, and turn it on.
        4. -
        5. Third, you will need to find a reliable and trustworthy source that provides Evertale Premium APK for download. You can search online for websites or blogs that offer this service, but be careful of fake or malicious links that may harm your device or data. You can also ask for recommendations from other players who have used Evertale Premium APK before.
        6. -
        7. Fourth, you will need to download Evertale Premium APK from the source that you have chosen. Make sure that you have enough storage space on your device for the file, which is about 70 MB in size. You can use a browser or a downloader app to download the file.
        8. -
        9. Fifth, you will need to locate and open the downloaded file on your device. You can use a file manager app or a browser to do this. You will see a prompt asking you to install Evertale Premium APK on your device. Tap on install and wait for the process to finish.
        10. -
        11. Sixth, you will need to launch Evertale Premium APK on your device. You will see a new icon on your home screen or app drawer that represents Evertale Premium APK. Tap on it and enjoy playing Evertale with free shopping.
        12. -
        -

        Tips and Tricks for Playing Evertale

        -

        Now that you have installed Evertale Premium APK on your device, you may want to know some tips and tricks on how to play Evertale better and enjoy its features more. Here are some of them:

        -

        How to catch and evolve rare monsters

        -

        One of the most exciting aspects of Evertale is catching and evolving rare monsters that can help you in your battles and quests. However, catching and evolving rare monsters is not easy, as they are often hidden or guarded by powerful enemies. Here are some tips on how to catch and evolve rare monsters:

        -
          -
        • To catch rare monsters, you need to explore different locations in the map and look for clues that indicate their presence. For example, you may see a glowing spot, a shaking bush, a flying shadow, or a strange sound. You can also use items such as bait or lure to attract rare monsters to your location.
        • -
        • To catch rare monsters, you also need to prepare enough capture crystals that match their element. For example, if you want to catch a fire monster, you need to use a fire capture crystal. You can buy capture crystals from the shop using gold coins or silver coins.
        • -
        • To catch rare monsters, you also need to weaken them first by battling them using your own monsters or heroes. You can use skills that deal damage or inflict status effects such as poison, burn, freeze, or stun. You can also use skills that lower their stats such as defense or speed.
        • -
        • To catch rare monsters, you also need to time your capture attempt well. You can see a capture gauge above their health bar that indicates their capture rate. The higher the capture rate, the higher the chance of capturing them successfully. The capture rate increases as their health decreases, but it also decreases as their turn comes closer.
        • -
        • To evolve rare monsters, you need to use certain items or reach certain levels that trigger their evolution. For example, some monsters evolve when they reach level 15, 30, or 45. Some monsters evolve when they use a specific item such as a fire stone, a water stone, or a thunder stone. Some monsters evolve when they are exposed to a certain environment such as a volcano, a lake, or a thunderstorm.
        • -
        • To evolve rare monsters, you also need to pay attention to their evolution requirements and conditions. For example, some monsters only evolve during the day or night, or when they are happy or angry. Some monsters only evolve when they are paired with another monster of the same or different species.
        • -
        -

        How to build a balanced and powerful team

        -

        Another important aspect of Evertale is building a balanced and powerful team that can handle any situation and challenge in the game. However, building a balanced and powerful team is not easy, as you have to consider various factors such as the number, type, element, role, and synergy of your monsters and heroes. Here are some tips on how to build a balanced and powerful team:

        -
          -
        • To build a balanced and powerful team, you need to have at least one monster or hero of each element in your team. This will allow you to deal with any elemental advantage or disadvantage that you may encounter in battle. For example, if you face a water enemy, you can use your fire monster or hero to deal more damage and take less damage.
        • -
        • To build a balanced and powerful team, you also need to have at least one monster or hero of each role in your team. There are four roles in Evertale: attacker, defender, supporter, and healer. Each role has its own function and contribution in battle. For example, attackers deal high damage, defenders tank damage and protect allies, supporters buff allies and debuff enemies, and healers restore health and cure status effects.
        • -
        • To build a balanced and powerful team, you also need to have at least one monster or hero that has a synergy with another monster or hero in your team. Synergy is a special effect that occurs when two or more monsters or heroes have a certain connection or compatibility with each other. For example, some monsters or heroes have a synergy based on their species, such as dragons, fairies, or demons. Some monsters or heroes have a synergy based on their story, such as siblings, friends, or rivals.
        • -
        -

        How to master the turn order and team spirit mechanics

        -

        As we mentioned earlier, the combat system of Evertale is turn-based and strategic. Therefore, mastering the turn order and team spirit mechanics is essential for winning battles and progressing in the game. Here are some tips on how to master the turn order and team spirit mechanics:

        -
          -
        • To master the turn order mechanic, you need to pay attention to the speed stat of each monster or hero in your team and your opponent's team. The speed stat determines who goes first and who goes last in each round of battle. You can see the turn order at the top of the screen during battle.
        • -
        • To master the turn order mechanic, you also need to use skills that can manipulate the speed stat of yourself or your opponent. For example, you can use skills that increase your speed or decrease your opponent's speed. You can also use skills that swap your position with another monster or hero in the turn order.
        • -
        • To master the turn order mechanic, you also need to plan ahead and anticipate your opponent's moves. For example, you can use skills that counter or interrupt your opponent's skills. You can also use skills that prepare or charge for a powerful attack on your next turn.
        • -
        • To master the team spirit mechanic, you need to fill up your team spirit gauge by attacking or getting attacked by your opponent. You can see the team spirit gauge at the bottom of the screen during battle.
        • -
        • To master the team spirit mechanic , you also need to use your team spirit gauge wisely by unleashing a team attack at the right moment. A team attack is a powerful attack that hits all enemies at once, and it can be activated by tapping on the team spirit gauge when it is full. You can also choose which monsters or heroes to include in your team attack by tapping on their icons before activating the team attack.
        • -
        • To master the team spirit mechanic, you also need to consider the element and role of each monster or hero in your team attack. For example, you can use a team attack that consists of fire monsters or heroes against a wind enemy, or a team attack that consists of attackers against a low-defense enemy.
        • -
        -

        How to participate in events and earn rewards

        -

        The last tip we have for playing Evertale is to participate in events and earn rewards. Events are special occasions that happen periodically in the game, and they offer various challenges and opportunities for players. For example, there are events that feature new or exclusive monsters or heroes, events that offer double or triple rewards, events that have special quests or dungeons, and events that have limited-time offers or discounts.

        -

        To participate in events and earn rewards, you need to check the event calendar and the event banner in the game regularly. You can see the event calendar by tapping on the menu button at the top right corner of the screen, and then tapping on the event button. You can see the event banner by swiping left or right on the main screen of the game.

        -

        To participate in events and earn rewards, you also need to complete the event missions and objectives that are given to you. You can see the event missions and objectives by tapping on the event banner, and then tapping on the mission button. You can earn various rewards such as soul stones, crystals, items, gold coins, silver coins, badges, and more by completing the event missions and objectives.

        -

        Conclusion

        -

        Evertale is a game that will appeal to any RPG fan who loves catching and training monsters, exploring a vast and beautiful world, and engaging in strategic and challenging battles. It has a captivating story mode, a monster-catching and training system, a turn-based combat system, and a multiplayer mode with guilds and PvP leagues. It also has stunning graphics, epic music, and smooth gameplay.

        -

        Evertale Premium APK is a modified version of the game that gives you free shopping in the game. It allows you to buy anything you want from the shop without spending any real money. However, it also has some risks and drawbacks that you should be aware of before using it.

        -

        We hope that this article has given you an overview of what Evertale is, what Evertale Premium APK is, and how to download and install it. We also hope that it has given you some tips and tricks on how to play Evertale better and enjoy its features more. If you have any questions or feedback about this article, please feel free to leave a comment below.

        -

        FAQs

        -

        Here are some frequently asked questions about Evertale and Evertale Premium APK:

        -
          -
        1. Q: Is Evertale free to play?
          -A: Yes, Evertale is free to download and play from the Google Play Store or the App Store. However, it also has some optional in-app purchases that can enhance your gameplay experience.
        2. -
        3. Q: Is Evertale Premium APK safe to use?
          -A: Evertale Premium APK is not an official or authorized version of the game, so it may not be safe to use. It may cause some glitches, bugs, or errors in the game that may affect your gameplay experience. It may also violate the terms and conditions of the game developer and publisher. It may also expose you to security risks such as viruses, malware, or spyware that may harm your device or personal data.
        4. -
        5. Q: How can I update Evertale Premium APK?
          -A: Evertale Premium APK may not be compatible with the latest updates and patches of the original Evertale game. Therefore, you may not be able to update it automatically from the app stores. You may need to find a new source that provides an updated version of Evertale Premium APK for download.
        6. -
        7. Q: How can I backup my progress in Evertale?
          -A: You can backup your progress in Evertale by linking your game account to your Facebook account or your Google Play Games account. You can do this by tapping on the menu button at the top right corner of the screen, then tapping on the settings button, then tapping on the account button. You can also restore your progress from your Facebook account or your Google Play Games account if you lose your device or switch devices.
        8. -
        9. Q: How can I contact the game developer or publisher of Evertale?
          -A: You can contact the game developer or publisher of Evertale by sending them an email at support@zigzagame.com. You can also visit their website at https://www.zigzagame.com/ or follow them on their social media accounts such as Facebook, Twitter, Instagram, or YouTube.
        10. -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download ETABS 2016 v 16.2.1 Full Crack for Windows 3264 bit.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download ETABS 2016 v 16.2.1 Full Crack for Windows 3264 bit.md deleted file mode 100644 index 133c0b942f0d0ed59758e771d25592ade803dcbf..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download ETABS 2016 v 16.2.1 Full Crack for Windows 3264 bit.md +++ /dev/null @@ -1,169 +0,0 @@ - -

        How to Download ETABS 2016 Full Crack

        -

        ETABS 2016 is one of the most popular software for structural analysis and design of buildings. It offers a comprehensive set of tools for engineers who work on single-story or high-rise commercial projects. However, ETABS 2016 is not cheap, and many users may want to get it for free by using a cracked version. In this article, we will show you how to download ETABS 2016 full crack, as well as the pros and cons of doing so.

        -

        download etabs 2016 full crack


        Download Filehttps://gohhs.com/2uPsI7



        -

        What is ETABS 2016 and why do you need it?

        -

        ETABS 2016 is a software developed by Computers and Structures, Inc. (CSI), a company that specializes in structural engineering software. ETABS stands for Extended Three Dimensional Analysis of Building Systems, and it is designed to help engineers model, analyze, and design complex structures with ease. Some of the features and benefits of ETABS 2016 are:

        -
          -
        • It supports various types of materials, such as steel, concrete, aluminum, and cold-formed steel.
        • -
        • It has a graphical user interface that allows users to create and edit models visually.
        • -
        • It has a powerful analysis engine that can handle linear and nonlinear behavior, dynamic response, buckling, progressive collapse, and more.
        • -
        • It has a wide range of code-based design features for different regions and standards.
        • -
        • It can generate detailed reports, drawings, and documentation for the design process.
        • -
        -

        If you are an engineer who works on building design projects, you may need ETABS 2016 to perform your tasks efficiently and accurately. However, ETABS 2016 is not a cheap software, and it requires a license to use. The license can be either standalone or network-based, depending on your needs. The price of the license varies depending on the features and modules you want to use. For example, according to the CSI website, the basic standalone license costs $5,000 USD, while the ultimate standalone license costs $12,000 USD.

        -

        Risks and drawbacks of using cracked software

        -

        Given the high cost of ETABS 2016, some users may be tempted to use a cracked version instead. A cracked version is a modified version of the software that bypasses the license verification process. This way, users can use the software without paying for it. However, using cracked software comes with many risks and drawbacks, such as:

        -
          -
        • It is illegal and unethical. Using cracked software violates the intellectual property rights of the software developers. It also deprives them of their revenue and incentive to improve their products. If you are caught using cracked software, you may face legal consequences such as fines or lawsuits.
        • -
        • It is unsafe and unreliable. Cracked software may contain malware or viruses that can harm your computer or steal your data. Cracked software may also have bugs or errors that can cause the software to malfunction or crash. You may lose your work or damage your files if you use cracked software.
        • -
        • It is unsupported and outdated. Cracked software does not receive updates or patches from the developers. This means that you may miss out on new features or improvements that are available in the official version. You may also encounter compatibility issues with other software or hardware if you use cracked software.
        • -
        -

        How to find and download ETABS 2016 full crack

        -

        If you still want to use ETABS 2016 full crack despite the risks and drawbacks, you will need to find and download it from the internet. However, this is not an easy task, as there are many fake or malicious websites that claim to offer cracked software. You will need to be careful and cautious when searching for and downloading ETABS 2016 full crack. Here are some tips and steps to follow:

        -

        Use a reliable torrent site

        -

        One of the most common ways to find and download cracked software is to use a torrent site. A torrent site is a website that hosts torrent files, which are small files that contain information about the files you want to download. You will need a torrent client, such as uTorrent or BitTorrent, to open and download the torrent files. However, not all torrent sites are trustworthy or safe. Some of them may have fake or infected files that can harm your computer or trick you into paying money. Therefore, you should use a reliable torrent site that has a good reputation and a large user base. Some of the best torrent sites for downloading ETABS 2016 full crack are:

        -

        download etabs 2016 v16.2.1 full for 32bit and 64 bit
        -download etabs 2016 with crack free
        -download etabs 2016 full version for windows 10
        -download etabs 2016 crack only
        -download etabs 2016 setup and crack
        -download etabs 2016 keygen
        -download etabs 2016 patch
        -download etabs 2016 license generator
        -download etabs 2016 activation code
        -download etabs 2016 serial number
        -download etabs 2016 ultimate integrated software package
        -download etabs 2016 structural analysis and design software
        -download etabs 2016 for civil engineering
        -download etabs 2016 for building design
        -download etabs 2016 for concrete and steel structures
        -download etabs 2016 for 3D object based modeling and visualization
        -download etabs 2016 for linear and nonlinear analysis
        -download etabs 2016 for sophisticated and comprehensive design capabilities
        -download etabs 2016 for insightful graphic displays and reports
        -download etabs 2016 for schematic drawings and detailing
        -download etabs 2016 from civilmdc.com website
        -download etabs 2016 from google drive link
        -download etabs 2016 from torrent file
        -download etabs 2016 from direct link
        -download etabs 2016 from mediafire link
        -how to download etabs 2016 full crack
        -how to install etabs 2016 full crack
        -how to use etabs 2016 full crack
        -how to activate etabs 2016 full crack
        -how to run as date etabs 2016 full crack
        -tutorial on how to download etabs 2016 full crack
        -guide on how to install and activate etabs 2016 full crack
        -video on how to use and run as date etabs 2016 full crack
        -review of etabs 2016 full crack software
        -features of etabs 2016 full crack software
        -benefits of using etabs 2016 full crack software
        -drawbacks of using etabs 2016 full crack software
        -alternatives to etabs 2016 full crack software
        -comparison of etabs 2016 full crack with other versions of etabs software
        -tips and tricks for using etabs 2016 full crack software effectively

        -

        1337X

        -

        1337X is one of the most popular torrent sites in the world, with millions of users and thousands of torrents. It has a simple and user-friendly interface that allows you to search and browse torrents by category, popularity, or date. It also has a dedicated section for software torrents, where you can find ETABS 2016 full crack easily. To download ETABS 2016 full crack from 1337X, you can follow these steps:

        -
          -
        1. Go to the 1337X website and type "ETABS 2016" in the search box.
        2. -
        3. Filter the results by choosing "Applications" under the category menu.
        4. -
        5. Sort the results by choosing "Seeders" under the order menu. This will show you the torrents that have the most seeders, which are users who have the complete file and are sharing it with others.
        6. -
        7. Pick a torrent that has a high number of seeders and leechers, which are users who are downloading the file from others. This will ensure that you can download the file faster and more reliably.
        8. -
        9. Click on the torrent name to open its page and check its details, such as size, description, comments, and ratings. Make sure that the torrent is genuine and has positive feedback from other users.
        10. -
        11. Click on the "Magnet Download" button to open the torrent file in your torrent client.
        12. -
        13. Choose a location to save the file and start the download.
        14. -
        -

        SnapFiles

        -

        SnapFiles is another popular torrent site that specializes in software torrents. It has a large collection of software torrents for various categories, such as antivirus, graphics, office, utilities, and more. It also has a clean and easy-to-use interface that lets you search and download torrents quickly and conveniently. To download ETABS 2016 full crack from SnapFiles, you can follow these steps:

        -
          -
        1. Go to the SnapFiles website and type "ETABS 2016" in the search box.
        2. -
        3. Select "Software" under the type menu and click on the "Search" button.
        4. -
        5. Choose a torrent that has a high number of seeders and leechers, as well as a good rating and comments from other users.
        6. -
        7. Click on the torrent name to open its page and check its details, such as size, description, screenshots, and reviews.
        8. -
        9. Click on the "Download Now" button to open the torrent file in your torrent client.
        10. -
        11. Choose a location to save the file and start the download.
        12. -
        -

        Use a VPN to protect your privacy and security

        -

        Use a malware scanner to check for viruses and spyware

        -

        Another risk of downloading ETABS 2016 full crack is that the file may contain malware or spyware that can infect your computer or steal your information. Malware is any software that is designed to harm or disrupt your system, such as viruses, worms, trojans, ransomware, etc. Spyware is any software that is designed to monitor or collect your personal or sensitive data, such as keystrokes, passwords, browsing history, etc. Therefore, you should use a malware scanner to check for viruses and spyware before opening or installing the file. A malware scanner is a software that can detect and remove malware or spyware from your computer. Some of the best malware scanners for scanning ETABS 2016 full crack are:

        -

        Malwarebytes

        -

        Malwarebytes is one of the most popular and trusted malware scanners in the market. It has a powerful and comprehensive scanning engine that can detect and remove various types of malware and spyware from your computer. It also has a user-friendly and intuitive interface that lets you scan and clean your computer easily and quickly. To scan ETABS 2016 full crack with Malwarebytes, you can follow these steps:

        -
          -
        1. Download and install Malwarebytes from its official website. You can use the free version or the premium version, depending on your preference.
        2. -
        3. Launch Malwarebytes and click on the "Scan" button.
        4. -
        5. Select the "Custom Scan" option and click on the "Configure Scan" button.
        6. -
        7. Select the location where you saved the ETABS 2016 full crack file and click on the "Scan Now" button.
        8. -
        9. Wait for the scan to complete and review the results.
        10. -
        11. If Malwarebytes detects any threats, click on the "Quarantine" button to remove them.
        12. -
        -

        Avast

        -

        Avast is another popular and reliable malware scanner that can protect your computer from various types of malware and spyware. It has a robust and advanced scanning engine that can identify and eliminate various types of threats from your computer. It also has a simple and elegant interface that lets you scan and secure your computer effortlessly and efficiently. To scan ETABS 2016 full crack with Avast, you can follow these steps:

        -
          -
        1. Download and install Avast from its official website. You can use the free version or the premium version, depending on your preference.
        2. -
        3. Launch Avast and click on the "Protection" tab.
        4. -
        5. Select the "Virus Scans" option and click on the "Scan for Viruses" button.
        6. -
        7. Select the "Custom Scan" option and click on the "Create Custom Scan" button.
        8. -
        9. Select the location where you saved the ETABS 2016 full crack file and click on the "Add" button.
        10. -
        11. Click on the "Run Scan" button to start the scan.
        12. -
        13. Wait for the scan to complete and review the results.
        14. -
        15. If Avast detects any threats, click on the "Resolve All" button to remove them.
        16. -
        -

        How to install and activate ETABS 2016 full crack

        -

        After you have downloaded ETABS 2016 full crack from a reliable torrent site, scanned it with a malware scanner, and verified that it is safe and clean, you can proceed to install and activate it on your computer. However, this is not a straightforward process, as you will need to follow some specific steps and instructions to make sure that the software works properly. Here are some general steps to follow:

        -

        Follow the instructions in the readme file

        -

        The first thing you should do before installing ETABS 2016 full crack is to read the readme file that comes with it. The readme file is a text document that contains important information and instructions about how to install and activate the software. You should follow the instructions carefully and precisely to avoid any errors or problems. The readme file may vary depending on the source of the crack, but it usually contains information such as:

        -
          -
        • The system requirements for running ETABS 2016.
        • -
        • The installation steps for ETABS 2016.
        • -
        • The activation steps for ETABS 2016.
        • -
        • The troubleshooting tips for common issues or errors.
        • -
        -

        Copy the crack file to the installation folder

        -

        Copy the crack file to the installation folder

        -

        The next thing you should do after reading the readme file is to copy the crack file to the installation folder of ETABS 2016. The crack file is a modified version of the original executable file of the software that allows you to use it without a license. The crack file may have different names, such as ETABS.exe, ETABS2016.exe, ETABS2016crack.exe, etc. You should replace the original executable file with the crack file in the installation folder. The installation folder is usually located in C:\Program Files\Computers and Structures\ETABS 2016 or C:\Program Files (x86)\Computers and Structures\ETABS 2016, depending on your system. To copy the crack file to the installation folder, you can follow these steps:

        -
          -
        1. Locate the crack file in the folder where you downloaded ETABS 2016 full crack.
        2. -
        3. Right-click on the crack file and select "Copy".
        4. -
        5. Go to the installation folder of ETABS 2016 and find the original executable file.
        6. -
        7. Right-click on the original executable file and select "Rename".
        8. -
        9. Rename the original executable file to something else, such as ETABS.old, ETABS.bak, etc. This will serve as a backup in case something goes wrong.
        10. -
        11. Right-click on an empty space in the installation folder and select "Paste".
        12. -
        13. This will copy the crack file to the installation folder and replace the original executable file.
        14. -
        -

        Run the program as administrator

        -

        The last thing you should do after copying the crack file to the installation folder is to run the program as administrator. This will ensure that the program has full access and permission to run on your computer. To run the program as administrator, you can follow these steps:

        -
          -
        1. Go to the installation folder of ETABS 2016 and find the crack file.
        2. -
        3. Right-click on the crack file and select "Properties".
        4. -
        5. Go to the "Compatibility" tab and check the box that says "Run this program as an administrator".
        6. -
        7. Click on "Apply" and then "OK".
        8. -
        9. Double-click on the crack file to launch ETABS 2016.
        10. -
        -

        Conclusion and FAQs

        -

        In this article, we have shown you how to download ETABS 2016 full crack, as well as the pros and cons of doing so. We have also given you some tips and steps on how to find, download, scan, install, and activate ETABS 2016 full crack on your computer. However, we do not recommend or endorse using cracked software, as it is illegal, unethical, unsafe, unreliable, unsupported, and outdated. If you want to use ETABS 2016 legally and safely, you should buy a license from CSI or use an alternative software that is free or cheaper. Here are some FAQs that may help you further:

        -

        Q: Is there a free trial version of ETABS 2016?

        -

        A: Yes, there is a free trial version of ETABS 2016 that you can download from CSI's website. The trial version is valid for 30 days and has all the features and modules of the ultimate version. However, you will need to register with CSI and provide some information to get a trial license.

        -

        Q: What are some alternative software to ETABS 2016?

        -

        A: There are some alternative software to ETABS 2016 that you can use for structural analysis and design of buildings. Some of them are:

        -
          -
        • SAP2000: Another software developed by CSI that is similar to ETABS but more general-purpose and versatile. It can handle various types of structures, such as bridges, dams, stadiums, etc.
        • -
        • RISA-3D: A software developed by RISA Technologies that is easy to use and fast to learn. It can handle various types of materials, loads, codes, and design features.
        • -
        • STAAD.Pro: A software developed by Bentley Systems that is widely used by engineers around the world. It can handle various types of analysis, design, documentation, and visualization.
        • -
        -

        Q: How can I update or upgrade ETABS 2016?

        -

        Q: How can I update or upgrade ETABS 2016?

        -

        A: If you have a licensed version of ETABS 2016, you can update or upgrade it by downloading the latest version from CSI's website. You will need to enter your license information and follow the installation instructions. However, if you have a cracked version of ETABS 2016, you will not be able to update or upgrade it, as it will not be compatible with the official version. You will need to find and download a new crack for the latest version, which may not be available or safe.

        -

        Q: How can I get help or support for ETABS 2016?

        -

        A: If you have a licensed version of ETABS 2016, you can get help or support from CSI by contacting them via phone, email, or online chat. You can also access their online resources, such as manuals, tutorials, videos, forums, etc. However, if you have a cracked version of ETABS 2016, you will not be able to get help or support from CSI, as they will not recognize your license or software. You will need to rely on unofficial sources, such as blogs, YouTube videos, or other users, which may not be accurate or reliable.

        -

        Q: How can I uninstall ETABS 2016?

        -

        A: If you want to uninstall ETABS 2016 from your computer, you can follow these steps:

        -
          -
        1. Go to the Control Panel and select "Programs and Features".
        2. -
        3. Find and select "ETABS 2016" from the list of programs and click on "Uninstall".
        4. -
        5. Follow the uninstallation wizard and confirm your choice.
        6. -
        7. Delete any remaining files or folders related to ETABS 2016 from your computer.
        8. -
        -

        I hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/fffffu/bing/src/pages/api/create.ts b/spaces/fffffu/bing/src/pages/api/create.ts deleted file mode 100644 index 508fa97ef609cbb215a61085711638e116235ebe..0000000000000000000000000000000000000000 --- a/spaces/fffffu/bing/src/pages/api/create.ts +++ /dev/null @@ -1,31 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { fetch, debug } from '@/lib/isomorphic' -import { createHeaders } from '@/lib/utils' - -// const API_ENDPOINT = 'https://www.bing.com/turing/conversation/create' -const API_ENDPOINT = 'https://edgeservices.bing.com/edgesvc/turing/conversation/create'; - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const headers = createHeaders(req.cookies) - - res.writeHead(200, { - 'Content-Type': 'application/json', - }) - - debug('headers', headers) - const response = await fetch(API_ENDPOINT, { method: 'GET', headers }) - .then((res) => res.text()) - - res.end(response) - } catch (e) { - return res.end(JSON.stringify({ - result: { - value: 'UnauthorizedRequest', - message: `${e}` - } - })) - } -} diff --git a/spaces/fffiloni/CLIP-Interrogator-2/README.md b/spaces/fffiloni/CLIP-Interrogator-2/README.md deleted file mode 100644 index 59ee41be44f8d0a6ed20049d73eb12cbebee18b2..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/CLIP-Interrogator-2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: CLIP Interrogator 2 -emoji: 🕵️‍♂️🕵️‍♂️ -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/fffiloni/video_frame_interpolation/README.md b/spaces/fffiloni/video_frame_interpolation/README.md deleted file mode 100644 index d0e33fd260744d96d9e295092bc289db2e482d2b..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/video_frame_interpolation/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Video Frame Interpolation -emoji: 🐠🐠 -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -duplicated_from: nanomenta/sketch_frame_interpolation ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/firestalker/anime-tts/monotonic_align/core.py b/spaces/firestalker/anime-tts/monotonic_align/core.py deleted file mode 100644 index 1f940605fe4fd0738fa0006149fcba14ef88223a..0000000000000000000000000000000000000000 --- a/spaces/firestalker/anime-tts/monotonic_align/core.py +++ /dev/null @@ -1,36 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), - nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val = -1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): - index = index - 1 diff --git a/spaces/flatindo/Image-Diffusion-WebUI/diffusion_webui/utils/data_utils.py b/spaces/flatindo/Image-Diffusion-WebUI/diffusion_webui/utils/data_utils.py deleted file mode 100644 index c57719012aa6d1e73e144c84ca0aaddeac33a383..0000000000000000000000000000000000000000 --- a/spaces/flatindo/Image-Diffusion-WebUI/diffusion_webui/utils/data_utils.py +++ /dev/null @@ -1,12 +0,0 @@ -from PIL import Image - - -def image_grid(imgs, rows, cols): - assert len(imgs) == rows * cols - - w, h = imgs[0].size - grid = Image.new("RGB", size=(cols * w, rows * h)) - - for i, img in enumerate(imgs): - grid.paste(img, box=(i % cols * w, i // cols * h)) - return grid diff --git a/spaces/flax-community/multilingual-image-captioning/sections/pretraining/model.md b/spaces/flax-community/multilingual-image-captioning/sections/pretraining/model.md deleted file mode 100644 index 32177ef55e49351cb752b053e046be135d565c61..0000000000000000000000000000000000000000 --- a/spaces/flax-community/multilingual-image-captioning/sections/pretraining/model.md +++ /dev/null @@ -1,10 +0,0 @@ -The model is shown in the image above. We create a custom model in Flax which integerates the CLIP Vision model as an encoder inside mBART model. We also use custom configs and modules in order to accomodate for these changes, and allow loading from mBART and CLIP Vision checkpoints. The image is fed to the CLIP Vision encoder and the shifted token ids are fed to the mBART decoder. We use the `facebook/mbart-large-50` and `openai/clip-vit-base-patch32` checkpoints for mBART and CLIP Vision models, respectively. All our code is available on [GitHub](https://github.com/gchhablani/multilingual-image-captioning). - -Our model reached **eval loss of ~2.6** around ~70K steps. Here are the BLEU scores (out of 1) for different languages: - -|Language |BLEU-1|BLEU-2|BLEU-3|BLEU-4| -|--------------------------|------|------|------|------| -|English | 0.13083| 0.08887| 0.06681 | 0.04899| -|Spanish | 0.15981| 0.09858| 0.06918| 0.04776| -|German | 0.14234| 0.09817| 0.07405| 0.0515| -|French | 0.13021| 0.08862| 0.06598| 0.04647| \ No newline at end of file diff --git a/spaces/fsqhn/anime-remove-background2/app.py b/spaces/fsqhn/anime-remove-background2/app.py deleted file mode 100644 index 230a0d5f8a3da6ab18ecb8db1cd90016a489b96a..0000000000000000000000000000000000000000 --- a/spaces/fsqhn/anime-remove-background2/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import gradio as gr -import huggingface_hub -import onnxruntime as rt -import numpy as np -import cv2 - - -def get_mask(img, s=1024): - img = (img / 255).astype(np.float32) - h, w = h0, w0 = img.shape[:-1] - h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s) - ph, pw = s - h, s - w - img_input = np.zeros([s, s, 3], dtype=np.float32) - img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h)) - img_input = np.transpose(img_input, (2, 0, 1)) - img_input = img_input[np.newaxis, :] - mask = rmbg_model.run(None, {'img': img_input})[0][0] - mask = np.transpose(mask, (1, 2, 0)) - mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] - mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis] - return mask - - -def rmbg_fn(img): - mask = get_mask(img) - img = (mask * img + 255 * (1 - mask)).astype(np.uint8) - mask = (mask * 255).astype(np.uint8) - img = np.concatenate([img, mask], axis=2, dtype=np.uint8) - mask = mask.repeat(3, axis=2) - return mask, img - - -if __name__ == "__main__": - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx") - rmbg_model = rt.InferenceSession(model_path, providers=providers) - app = gr.Blocks() - with app: - gr.Markdown("# Anime Remove Background\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.animeseg)\n\n" - "demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)") - with gr.Row(): - with gr.Column(): - input_img = gr.Image(label="input image") - examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)] - examples = gr.Dataset(components=[input_img], samples=examples_data) - run_btn = gr.Button(variant="primary") - output_mask = gr.Image(label="mask") - output_img = gr.Image(label="result", image_mode="RGBA") - examples.click(lambda x: x[0], [examples], [input_img]) - run_btn.click(rmbg_fn, [input_img], [output_mask, output_img]) - app.launch() diff --git a/spaces/gauthamk/EuroSAT-ResNet34/app.py b/spaces/gauthamk/EuroSAT-ResNet34/app.py deleted file mode 100644 index 9b16339e2736dbe6711af68d27161b692a77a026..0000000000000000000000000000000000000000 --- a/spaces/gauthamk/EuroSAT-ResNet34/app.py +++ /dev/null @@ -1,13 +0,0 @@ -import gradio as gr -import os -from functions import * - -examples_dir = 'examples' -title = "Land Use Classification - ResNet34 PyTorch" -examples = [os.path.join(examples_dir, i) for i in os.listdir('examples')] - -interface = gr.Interface(fn=predict, inputs=gr.Image(type= 'numpy', shape=(64, 64)).style(height= 256), - outputs= gr.Label(num_top_classes= 5), cache_examples= False, - examples= examples, title= title) - -interface.launch() \ No newline at end of file diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py deleted file mode 100644 index a3941e27874993418b3b5708d5a7485f175ff9c8..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .registry import CONV_LAYERS - - -def conv_ws_2d(input, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - eps=1e-5): - c_in = weight.size(0) - weight_flat = weight.view(c_in, -1) - mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) - std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) - weight = (weight - mean) / (std + eps) - return F.conv2d(input, weight, bias, stride, padding, dilation, groups) - - -@CONV_LAYERS.register_module('ConvWS') -class ConvWS2d(nn.Conv2d): - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True, - eps=1e-5): - super(ConvWS2d, self).__init__( - in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - bias=bias) - self.eps = eps - - def forward(self, x): - return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, - self.dilation, self.groups, self.eps) - - -@CONV_LAYERS.register_module(name='ConvAWS') -class ConvAWS2d(nn.Conv2d): - """AWS (Adaptive Weight Standardization) - - This is a variant of Weight Standardization - (https://arxiv.org/pdf/1903.10520.pdf) - It is used in DetectoRS to avoid NaN - (https://arxiv.org/pdf/2006.02334.pdf) - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the conv kernel - stride (int or tuple, optional): Stride of the convolution. Default: 1 - padding (int or tuple, optional): Zero-padding added to both sides of - the input. Default: 0 - dilation (int or tuple, optional): Spacing between kernel elements. - Default: 1 - groups (int, optional): Number of blocked connections from input - channels to output channels. Default: 1 - bias (bool, optional): If set True, adds a learnable bias to the - output. Default: True - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True): - super().__init__( - in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - bias=bias) - self.register_buffer('weight_gamma', - torch.ones(self.out_channels, 1, 1, 1)) - self.register_buffer('weight_beta', - torch.zeros(self.out_channels, 1, 1, 1)) - - def _get_weight(self, weight): - weight_flat = weight.view(weight.size(0), -1) - mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1) - std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1) - weight = (weight - mean) / std - weight = self.weight_gamma * weight + self.weight_beta - return weight - - def forward(self, x): - weight = self._get_weight(self.weight) - return F.conv2d(x, weight, self.bias, self.stride, self.padding, - self.dilation, self.groups) - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - """Override default load function. - - AWS overrides the function _load_from_state_dict to recover - weight_gamma and weight_beta if they are missing. If weight_gamma and - weight_beta are found in the checkpoint, this function will return - after super()._load_from_state_dict. Otherwise, it will compute the - mean and std of the pretrained weights and store them in weight_beta - and weight_gamma. - """ - - self.weight_gamma.data.fill_(-1) - local_missing_keys = [] - super()._load_from_state_dict(state_dict, prefix, local_metadata, - strict, local_missing_keys, - unexpected_keys, error_msgs) - if self.weight_gamma.data.mean() > 0: - for k in local_missing_keys: - missing_keys.append(k) - return - weight = self.weight.data - weight_flat = weight.view(weight.size(0), -1) - mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1) - std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1) - self.weight_beta.data.copy_(mean) - self.weight_gamma.data.copy_(std) - missing_gamma_beta = [ - k for k in local_missing_keys - if k.endswith('weight_gamma') or k.endswith('weight_beta') - ] - for k in missing_gamma_beta: - local_missing_keys.remove(k) - for k in local_missing_keys: - missing_keys.append(k) diff --git a/spaces/getrajeev03/text2sql/app.py b/spaces/getrajeev03/text2sql/app.py deleted file mode 100644 index 931a6492e9e1a156c8dc458a024c99379d68e0e0..0000000000000000000000000000000000000000 --- a/spaces/getrajeev03/text2sql/app.py +++ /dev/null @@ -1,17 +0,0 @@ -import gradio as gr -from transformers import AutoTokenizer, AutoModelWithLMHead - -tokenizer = AutoTokenizer.from_pretrained("dbernsohn/t5_wikisql_en2SQL") -model = AutoModelWithLMHead.from_pretrained("dbernsohn/t5_wikisql_en2SQL") - -def greet(query): - input_text = f"translate English to Sql: {query} " - features = tokenizer([input_text], return_tensors='pt') - output = model.generate(input_ids=features['input_ids'], - attention_mask=features['attention_mask']) - return tokenizer.decode(output[0]) - -iface = gr.Interface(fn=greet, inputs="text", outputs="text", examples=[ - "what are the names of all the people in the USA" -]) -iface.launch() \ No newline at end of file diff --git a/spaces/ggffdd/White-box-Cartoonization/app.py b/spaces/ggffdd/White-box-Cartoonization/app.py deleted file mode 100644 index c55ced56bd87a85f59d1c8ef84b7eca87422720f..0000000000000000000000000000000000000000 --- a/spaces/ggffdd/White-box-Cartoonization/app.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations -import argparse -import functools -import os -import pathlib -import sys -from typing import Callable -import uuid - -import gradio as gr -import huggingface_hub -import numpy as np -import PIL.Image - -from io import BytesIO -from wbc.cartoonize import Cartoonize - -ORIGINAL_REPO_URL = 'https://github.com/SystemErrorWang/White-box-Cartoonization' -TITLE = 'SystemErrorWang/White-box-Cartoonization' -DESCRIPTION = f"""This is a demo for {ORIGINAL_REPO_URL}. - -""" -ARTICLE = """ - -""" - -SAFEHASH = [x for x in "0123456789-abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ"] -def compress_UUID(): - ''' - 根据http://www.ietf.org/rfc/rfc1738.txt,由uuid编码扩bai大字符域生成du串 - 包括:[0-9a-zA-Z\-_]共64个 - 长度:(32-2)/3*2=20 - 备注:可在地球上人zhi人都用,使用100年不重复(2^120) - :return:String - ''' - row = str(uuid.uuid4()).replace('-', '') - safe_code = '' - for i in range(10): - enbin = "%012d" % int(bin(int(row[i * 3] + row[i * 3 + 1] + row[i * 3 + 2], 16))[2:], 10) - safe_code += (SAFEHASH[int(enbin[0:6], 2)] + SAFEHASH[int(enbin[6:12], 2)]) - safe_code = safe_code.replace('-', '') - return safe_code - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--theme', type=str) - parser.add_argument('--live', action='store_true') - parser.add_argument('--share', action='store_true') - parser.add_argument('--port', type=int) - parser.add_argument('--disable-queue', - dest='enable_queue', - action='store_false') - parser.add_argument('--allow-flagging', type=str, default='never') - parser.add_argument('--allow-screenshot', action='store_true') - return parser.parse_args() - -def run( - image, - cartoonize : Cartoonize -) -> tuple[PIL.Image.Image]: - - out_path = compress_UUID()+'.png' - cartoonize.run_sigle(image.name, out_path) - - return PIL.Image.open(out_path) - - -def main(): - gr.close_all() - - args = parse_args() - - cartoonize = Cartoonize(os.path.join(os.path.dirname(os.path.abspath(__file__)),'wbc/saved_models/')) - - func = functools.partial(run, cartoonize=cartoonize) - func = functools.update_wrapper(func, run) - - gr.Interface( - func, - [ - gr.inputs.Image(type='file', label='Input Image'), - ], - [ - gr.outputs.Image( - type='pil', - label='Result'), - ], - # examples=examples, - theme=args.theme, - title=TITLE, - description=DESCRIPTION, - article=ARTICLE, - allow_screenshot=args.allow_screenshot, - allow_flagging=args.allow_flagging, - live=args.live, - ).launch( - enable_queue=args.enable_queue, - server_port=args.port, - share=args.share, - ) - - -if __name__ == '__main__': - main() diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Free Download Block Survival Legend Of The Lost Islands .zip.md b/spaces/gotiQspiryo/whisper-ui/examples/Free Download Block Survival Legend Of The Lost Islands .zip.md deleted file mode 100644 index 613781ad374098de56b4529e651f66eb6932d426..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Free Download Block Survival Legend Of The Lost Islands .zip.md +++ /dev/null @@ -1,21 +0,0 @@ -
        -

        How to Download Block Survival: Legend of the Lost Islands for Free

        -

        Block Survival: Legend of the Lost Islands is a sandbox survival game where you explore, craft, and fight your way through a procedurally generated world full of secrets and dangers. You can play solo or with friends in online co-op mode, and customize your character with different skins and weapons.

        -

        If you want to download Block Survival: Legend of the Lost Islands for free, you can follow these simple steps:

        -

        Free Download Block Survival: Legend of the Lost Islands .zip


        Download Ziphttps://urlgoal.com/2uyLDd



        -
          -
        1. Click on the link below to go to the download page.
        2. -
        3. Choose your preferred platform (Windows, Mac, or Linux) and click on the download button.
        4. -
        5. Wait for the download to finish and extract the .zip file to your desired location.
        6. -
        7. Run the game executable and enjoy!
        8. -
        -

        Note: This is a cracked version of the game, so you may encounter some bugs or errors. Also, downloading pirated games is illegal and may harm your device or expose you to malware. We do not condone or support piracy in any way. Please buy the game from the official store if you like it and want to support the developers.

        -Download Block Survival: Legend of the Lost Islands for Free - -

        Block Survival: Legend of the Lost Islands is a game that offers a lot of freedom and creativity. You can explore different biomes, such as forests, deserts, mountains, and oceans, and discover hidden caves, temples, and dungeons. You can also collect resources and craft various items, such as tools, weapons, armor, and vehicles. You can even build your own base and decorate it with furniture and paintings.

        -

        The game also features a dynamic day and night cycle, weather effects, and realistic physics. You will have to face different challenges and threats, such as hunger, thirst, temperature, and wildlife. You will also encounter hostile creatures and bosses that will test your skills and strategy. You can use different weapons and gadgets to fight them, such as bows, guns, grenades, jetpacks, and grappling hooks.

        -

        Block Survival: Legend of the Lost Islands is a game that will keep you entertained for hours with its endless possibilities and fun gameplay. You can play it alone or with your friends in online co-op mode, where you can share resources, items, and vehicles. You can also customize your character with different skins and accessories, and unlock achievements and trophies.

        -

        If you want to experience this amazing game for free, download it now from the link below. But remember, this is a cracked version of the game that may not work properly or may contain viruses. We recommend that you buy the game from the official store if you enjoy it and want to support the developers.

        -Download Block Survival: Legend of the Lost Islands for Free

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/gradio/HuBERT/examples/speech_to_text/README.md b/spaces/gradio/HuBERT/examples/speech_to_text/README.md deleted file mode 100644 index f639d300d342f8de1392c98bfc44ec8690188539..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/speech_to_text/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Speech-to-Text (S2T) Modeling - -[https://www.aclweb.org/anthology/2020.aacl-demo.6](https://www.aclweb.org/anthology/2020.aacl-demo.6.pdf) - -Speech recognition (ASR) and speech-to-text translation (ST) with fairseq. - -## Data Preparation -S2T modeling data consists of source speech features, target text and other optional information -(source text, speaker id, etc.). Fairseq S2T uses per-dataset-split TSV manifest files -to store these information. Each data field is represented by a column in the TSV file. - -Unlike text token embeddings, speech features (e.g. log mel-scale filter banks) are usually fixed -during model training and can be pre-computed. The manifest file contains the path to -either the feature file in NumPy format or the WAV/FLAC audio file. For the latter, -features will be extracted on-the-fly by fairseq S2T. Optionally, feature/audio files can be packed -into uncompressed ZIP files (then accessed via byte offset and length) to improve I/O performance. - -Fairseq S2T also employs a YAML file for data related configurations: tokenizer type and dictionary path -for the target text, feature transforms such as CMVN (cepstral mean and variance normalization) and SpecAugment, -temperature-based resampling, etc. - -## Model Training -Fairseq S2T uses the unified `fairseq-train` interface for model training. It requires arguments `--task speech_to_text`, - `--arch ` and `--config-yaml `. - -## Inference & Evaluation -Fairseq S2T uses the unified `fairseq-generate`/`fairseq-interactive` interface for inference and evaluation. It -requires arguments `--task speech_to_text` and `--config-yaml `. The interactive console takes -audio paths (one per line) as inputs. - - -## Examples -- [Speech Recognition (ASR) on LibriSpeech](docs/librispeech_example.md) - -- [Speech-to-Text Translation (ST) on MuST-C](docs/mustc_example.md) - -- [Speech-to-Text Translation (ST) on CoVoST 2](docs/covost_example.md) - -- [Speech-to-Text Translation (ST) on Multilingual TEDx](docs/mtedx_example.md) -- [Simultaneous Speech-to-Text Translation (SimulST) on MuST-C](docs/simulst_mustc_example.md) - -## Updates -- 02/04/2021: Added interactive decoding (`fairseq-interactive`) support. Examples: - [ASR (LibriSpeech)](docs/librispeech_example.md#interactive-decoding) - and [ST (CoVoST 2)](docs/covost_example.md#interactive-decoding). -- 01/08/2021: Several fixes for S2T Transformer model, inference-time de-tokenization, scorer configuration and data - preparation scripts. We also add pre-trained models to the examples and revise the instructions. - Breaking changes: the data preparation scripts now extract filterbank features without CMVN. CMVN is instead applied - on-the-fly (defined in the config YAML). - -## What's Next -- We are migrating the old fairseq [ASR example](../speech_recognition) into this S2T framework and - merging the features from both sides. -- The following papers also base their experiments on fairseq S2T. We are adding more examples for replication. - - [Improving Cross-Lingual Transfer Learning for End-to-End Speech Recognition with Speech Translation (Wang et al., 2020)](https://arxiv.org/abs/2006.05474) - - [Self-Supervised Representations Improve End-to-End Speech Translation (Wu et al., 2020)](https://arxiv.org/abs/2006.12124) - - [Self-Training for End-to-End Speech Translation (Pino et al., 2020)](https://arxiv.org/abs/2006.02490) - - [CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus (Wang et al., 2020)](https://arxiv.org/abs/2002.01320) - - [Harnessing Indirect Training Data for End-to-End Automatic Speech Translation: Tricks of the Trade (Pino et al., 2019)](https://arxiv.org/abs/1909.06515) - -## Citation -Please cite as: -``` -@inproceedings{wang2020fairseqs2t, - title = {fairseq S2T: Fast Speech-to-Text Modeling with fairseq}, - author = {Changhan Wang and Yun Tang and Xutai Ma and Anne Wu and Dmytro Okhonko and Juan Pino}, - booktitle = {Proceedings of the 2020 Conference of the Asian Chapter of the Association for Computational Linguistics (AACL): System Demonstrations}, - year = {2020}, -} - -@inproceedings{ott2019fairseq, - title = {fairseq: A Fast, Extensible Toolkit for Sequence Modeling}, - author = {Myle Ott and Sergey Edunov and Alexei Baevski and Angela Fan and Sam Gross and Nathan Ng and David Grangier and Michael Auli}, - booktitle = {Proceedings of NAACL-HLT 2019: Demonstrations}, - year = {2019}, -} -``` diff --git a/spaces/gradio/HuBERT/fairseq/modules/dynamicconv_layer/__init__.py b/spaces/gradio/HuBERT/fairseq/modules/dynamicconv_layer/__init__.py deleted file mode 100644 index 22dc6f403d2a0ecdb1b9e7e69ed96bd560e93b2c..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/modules/dynamicconv_layer/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .dynamicconv_layer import DynamicconvLayer # noqa diff --git a/spaces/gradio/timeseries-forecasting-with-prophet/app.py b/spaces/gradio/timeseries-forecasting-with-prophet/app.py deleted file mode 100644 index 373a6de4bf8fa877ea2c6d21184113daf0141ce5..0000000000000000000000000000000000000000 --- a/spaces/gradio/timeseries-forecasting-with-prophet/app.py +++ /dev/null @@ -1,41 +0,0 @@ -import gradio as gr -import pypistats -from datetime import date -from dateutil.relativedelta import relativedelta -import pandas as pd -from prophet import Prophet -pd.options.plotting.backend = "plotly" - -def get_forecast(lib, time): - - data = pypistats.overall(lib, total=True, format="pandas") - data = data.groupby("category").get_group("with_mirrors").sort_values("date") - start_date = date.today() - relativedelta(months=int(time.split(" ")[0])) - df = data[(data['date'] > str(start_date))] - - df1 = df[['date','downloads']] - df1.columns = ['ds','y'] - - m = Prophet() - m.fit(df1) - future = m.make_future_dataframe(periods=90) - forecast = m.predict(future) - fig1 = m.plot(forecast) - return fig1 - -with gr.Blocks() as demo: - gr.Markdown( - """ - **Pypi Download Stats 📈 with Prophet Forecasting**: see live download stats for popular open-source libraries 🤗 along with a 3 month forecast using Prophet. The [ source code for this Gradio demo is here](https://huggingface.co/spaces/gradio/timeseries-forecasting-with-prophet/blob/main/app.py). - """) - with gr.Row(): - lib = gr.Dropdown(["pandas", "scikit-learn", "torch", "prophet"], label="Library", value="pandas") - time = gr.Dropdown(["3 months", "6 months", "9 months", "12 months"], label="Downloads over the last...", value="12 months") - - plt = gr.Plot() - - lib.change(get_forecast, [lib, time], plt, queue=False) - time.change(get_forecast, [lib, time], plt, queue=False) - demo.load(get_forecast, [lib, time], plt, queue=False) - -demo.launch() \ No newline at end of file diff --git a/spaces/gsaivinay/Llama-2-13B-GGML-UI/Dockerfile b/spaces/gsaivinay/Llama-2-13B-GGML-UI/Dockerfile deleted file mode 100644 index 093f5352f7e811d0515119a0272de7631bade967..0000000000000000000000000000000000000000 --- a/spaces/gsaivinay/Llama-2-13B-GGML-UI/Dockerfile +++ /dev/null @@ -1,63 +0,0 @@ -FROM node:18-alpine AS base - -# Install dependencies only when needed -FROM base AS deps -# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed. -RUN apk add --no-cache libc6-compat -WORKDIR /app - -# Install dependencies based on the preferred package manager -COPY package.json yarn.lock* package-lock.json* pnpm-lock.yaml* ./ -RUN \ - if [ -f yarn.lock ]; then yarn --frozen-lockfile; \ - elif [ -f package-lock.json ]; then npm ci; \ - elif [ -f pnpm-lock.yaml ]; then yarn global add pnpm && pnpm i --frozen-lockfile; \ - else echo "Lockfile not found." && exit 1; \ - fi - -# Uncomment the following lines if you want to use a secret at buildtime, -# for example to access your private npm packages -# RUN --mount=type=secret,id=HF_EXAMPLE_SECRET,mode=0444,required=true \ -# $(cat /run/secrets/HF_EXAMPLE_SECRET) - -# Rebuild the source code only when needed -FROM base AS builder -WORKDIR /app -COPY --from=deps /app/node_modules ./node_modules -COPY . . - -# Next.js collects completely anonymous telemetry data about general usage. -# Learn more here: https://nextjs.org/telemetry -# Uncomment the following line in case you want to disable telemetry during the build. -# ENV NEXT_TELEMETRY_DISABLED 1 - -# RUN yarn build - -# If you use yarn, comment out this line and use the line above -RUN npm run build - -# Production image, copy all the files and run next -FROM base AS runner -WORKDIR /app - -ENV NODE_ENV production -# Uncomment the following line in case you want to disable telemetry during runtime. -# ENV NEXT_TELEMETRY_DISABLED 1 - -RUN addgroup --system --gid 1001 nodejs -RUN adduser --system --uid 1001 nextjs - -COPY --from=builder /app/public ./public - -# Automatically leverage output traces to reduce image size -# https://nextjs.org/docs/advanced-features/output-file-tracing -COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./ -COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static - -USER nextjs - -EXPOSE 3000 - -ENV PORT 3000 - -CMD ["node", "server.js"] \ No newline at end of file diff --git a/spaces/gvozdev/subspace/README.md b/spaces/gvozdev/subspace/README.md deleted file mode 100644 index ceca0a0e1cfe8c2bc0a35c10b052b81599690166..0000000000000000000000000000000000000000 --- a/spaces/gvozdev/subspace/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Subspace -emoji: 🚀 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.35.2 -app_file: main.py -pinned: false ---- \ No newline at end of file diff --git a/spaces/gwang-kim/DATID-3D/pose_estimation/models/arcface_torch/configs/ms1mv3_r2060.py b/spaces/gwang-kim/DATID-3D/pose_estimation/models/arcface_torch/configs/ms1mv3_r2060.py deleted file mode 100644 index 23ad81e082c4b6390b67b164d0ceb84bb0635684..0000000000000000000000000000000000000000 --- a/spaces/gwang-kim/DATID-3D/pose_estimation/models/arcface_torch/configs/ms1mv3_r2060.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "arcface" -config.network = "r2060" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 64 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/ms1m-retinaface-t1" -config.num_classes = 93431 -config.num_image = 5179510 -config.num_epoch = 25 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/h2oai/wave-tour/examples/tour-assets/monaco/editor/editor.main.js b/spaces/h2oai/wave-tour/examples/tour-assets/monaco/editor/editor.main.js deleted file mode 100644 index 23f518dd593608e299f73c2860088ee4c86c40d7..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/tour-assets/monaco/editor/editor.main.js +++ /dev/null @@ -1,805 +0,0 @@ -/*!----------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Version: 0.33.0(c722ca6c7eed3d7987c0d5c3df5c45f6b15e77d1) - * Released under the MIT license - * https://github.com/microsoft/vscode/blob/main/LICENSE.txt - *-----------------------------------------------------------*/(function(){var te=["exports","require","vs/base/common/lifecycle","vs/editor/common/core/range","vs/base/common/event","vs/nls","vs/nls!vs/editor/editor.main","vs/base/browser/dom","vs/base/common/strings","vs/platform/instantiation/common/instantiation","vs/base/common/async","vs/editor/common/core/position","vs/css!vs/editor/editor.main","vs/base/common/errors","vs/platform/theme/common/themeService","vs/editor/browser/editorExtensions","vs/base/common/platform","vs/platform/contextkey/common/contextkey","vs/base/common/arrays","vs/base/common/types","vs/editor/common/services/languageFeatures","vs/base/common/cancellation","vs/editor/common/core/selection","vs/platform/theme/common/colorRegistry","vs/editor/common/editorContextKeys","vs/base/common/uri","vs/platform/commands/common/commands","vs/editor/common/languages","vs/base/common/codicons","vs/editor/browser/services/codeEditorService","vs/base/common/color","vs/base/browser/fastDomNode","vs/editor/common/config/editorOptions","vs/editor/common/languages/languageConfigurationRegistry","vs/base/browser/browser","vs/editor/common/languages/language","vs/platform/registry/common/platform","vs/platform/actions/common/actions","vs/editor/common/model/textModel","vs/platform/notification/common/notification","vs/base/common/objects","vs/platform/configuration/common/configuration","vs/base/common/resources","vs/platform/keybinding/common/keybinding","vs/base/browser/keyboardEvent","vs/base/common/network","vs/base/browser/ui/aria/aria","vs/base/common/actions","vs/editor/common/services/model","vs/base/common/map","vs/editor/common/model","vs/editor/browser/view/viewPart","vs/platform/instantiation/common/extensions","vs/platform/opener/common/opener","vs/editor/common/core/editorColorRegistry","vs/base/common/stopwatch","vs/editor/common/services/resolverService","vs/base/common/iterator","vs/base/common/keyCodes","vs/base/browser/mouseEvent","vs/base/browser/touch","vs/editor/common/cursorCommon","vs/base/browser/ui/widget","vs/editor/common/core/editOperation","vs/editor/browser/config/domFontInfo","vs/platform/accessibility/common/accessibility","vs/platform/log/common/log","vs/editor/common/services/languageFeatureDebounce","vs/base/common/htmlContent","vs/base/browser/ui/scrollbar/scrollableElement","vs/editor/common/core/cursorColumns","vs/editor/common/viewModel","vs/editor/common/standaloneStrings","vs/platform/progress/common/progress","vs/platform/theme/common/iconRegistry","vs/base/common/filters","vs/base/common/severity","vs/editor/common/tokens/lineTokens","vs/platform/contextview/browser/contextView","vs/platform/markers/common/markers","vs/platform/quickinput/common/quickInput","vs/editor/common/languages/modesRegistry","vs/platform/storage/common/storage","vs/base/common/linkedList","vs/base/common/path","vs/editor/common/model/bracketPairsTextModelPart/bracketPairsTree/length","vs/editor/contrib/hover/browser/hoverTypes","vs/base/browser/ui/actionbar/actionbar","vs/editor/common/services/editorWorker","vs/platform/keybinding/common/keybindingsRegistry","vs/platform/telemetry/common/telemetry","vs/base/common/functional","vs/base/browser/event","vs/editor/common/core/stringBuilder","vs/editor/common/textModelEvents","vs/base/common/decorators","vs/base/common/keybindings","vs/base/common/iconLabels","vs/base/browser/globalMouseMoveMonitor","vs/editor/common/core/characterClassifier","vs/editor/common/commands/replaceCommand","vs/editor/common/model/bracketPairsTextModelPart/bracketPairsTree/smallImmutableSet","vs/editor/browser/view/dynamicViewOverlay","vs/editor/standalone/common/standaloneTheme","vs/platform/clipboard/common/clipboardService","vs/platform/configuration/common/configurationRegistry","vs/editor/contrib/markdownRenderer/browser/markdownRenderer","vs/platform/quickinput/common/quickAccess","vs/editor/contrib/editorState/browser/editorState","vs/editor/contrib/suggest/browser/suggest","vs/editor/contrib/peekView/browser/peekView","vs/base/browser/ui/tree/tree","vs/base/common/buffer","vs/base/common/numbers","vs/base/common/hash","vs/base/browser/ui/iconLabel/iconLabels","vs/base/browser/ui/sash/sash","vs/base/browser/ui/list/listWidget","vs/editor/browser/view/renderingContext","vs/editor/common/core/eolCounter","vs/editor/common/core/wordCharacterClassifier","vs/editor/common/core/wordHelper","vs/editor/common/languages/languageConfiguration","vs/editor/common/languages/supports","vs/editor/common/languages/nullTokenize","vs/editor/common/viewEventHandler","vs/editor/common/viewLayout/viewLineRenderer","vs/editor/contrib/snippet/browser/snippetParser","vs/base/browser/ui/actionbar/actionViewItems","vs/editor/contrib/gotoSymbol/browser/referencesModel","vs/platform/dialogs/common/dialogs","vs/platform/label/common/label","vs/platform/layout/browser/layoutService","vs/editor/browser/editorDom","vs/platform/theme/common/styler","vs/platform/theme/common/theme","vs/base/common/idGenerator","vs/base/common/lazy","vs/base/common/mime","vs/base/common/range","vs/base/common/scrollable","vs/base/common/diff/diff","vs/base/common/uint","vs/base/browser/ui/codicons/codiconStyles","vs/base/browser/ui/mouseCursor/mouseCursor","vs/css!vs/base/parts/quickinput/browser/media/quickInput","vs/editor/browser/stableEditorScroll","vs/editor/common/config/editorZoom","vs/editor/common/core/textModelDefaults","vs/editor/common/editorCommon","vs/editor/browser/editorBrowser","vs/editor/common/cursor/cursorWordOperations","vs/editor/common/model/bracketPairsTextModelPart/bracketPairsTree/ast","vs/editor/common/model/textModelSearch","vs/editor/common/viewLayout/lineDecorations","vs/editor/contrib/codeAction/browser/types","vs/editor/common/services/textResourceConfiguration","vs/platform/instantiation/common/serviceCollection","vs/editor/browser/coreCommands","vs/editor/contrib/codeAction/browser/codeAction","vs/editor/contrib/message/browser/messageController","vs/platform/list/browser/listService","vs/platform/undoRedo/common/undoRedo","vs/editor/browser/widget/codeEditorWidget","vs/editor/browser/widget/embeddedCodeEditorWidget","vs/editor/contrib/find/browser/findModel","vs/base/browser/dnd","vs/base/browser/canIUse","vs/base/common/extpath","vs/base/browser/ui/tree/indexTreeModel","vs/base/browser/ui/tree/objectTreeModel","vs/base/browser/formattedTextRenderer","vs/base/browser/ui/scrollbar/scrollbarArrow","vs/base/common/labels","vs/base/browser/ui/checkbox/checkbox","vs/base/browser/ui/list/listView","vs/editor/common/config/fontInfo","vs/editor/common/core/indentation","vs/editor/browser/controller/textAreaInput","vs/editor/browser/view/viewLayer","vs/editor/common/cursor/cursorMoveOperations","vs/editor/common/cursor/cursorDeleteOperations","vs/editor/common/cursor/cursorMoveCommands","vs/editor/common/languages/supports/richEditBrackets","vs/editor/common/model/utils","vs/editor/common/standalone/standaloneEnums","vs/editor/common/textModelGuides","vs/editor/common/model/bracketPairsTextModelPart/bracketPairsTree/tokenizer","vs/editor/browser/viewParts/glyphMargin/glyphMargin","vs/editor/common/viewEvents","vs/editor/common/viewModelEventDispatcher","vs/editor/contrib/folding/browser/foldingRanges","vs/editor/contrib/gotoSymbol/browser/link/clickLinkGesture","vs/editor/contrib/inlineCompletions/browser/ghostText","vs/editor/contrib/inlineCompletions/browser/inlineCompletionToGhostText","vs/base/browser/ui/iconLabel/iconLabel","vs/base/browser/ui/tree/abstractTree","vs/base/browser/ui/inputbox/inputBox","vs/base/common/keybindingLabels","vs/platform/instantiation/common/descriptors","vs/editor/browser/services/bulkEditService","vs/editor/common/services/markerDecorations","vs/editor/common/commands/shiftCommand","vs/editor/common/cursor/cursorTypeOperations","vs/editor/contrib/parameterHints/browser/provideSignatureHelp","vs/editor/contrib/documentSymbols/browser/outlineModel","vs/platform/jsonschemas/common/jsonContributionRegistry","vs/editor/contrib/hover/browser/markdownHoverParticipant","vs/editor/contrib/inlineCompletions/browser/inlineCompletionsModel","vs/platform/actions/browser/menuEntryActionViewItem","vs/editor/contrib/gotoSymbol/browser/goToCommands","vs/platform/workspace/common/workspace","vs/editor/contrib/snippet/browser/snippetController2","vs/editor/standalone/browser/standaloneServices","vs/base/browser/iframe","vs/base/browser/ui/scrollbar/scrollbarState","vs/base/common/assert","vs/base/common/collections","vs/base/common/glob","vs/base/common/marshalling","vs/base/browser/ui/highlightedlabel/highlightedLabel","vs/base/browser/ui/scrollbar/abstractScrollbar","vs/base/common/worker/simpleWorker","vs/base/parts/quickinput/common/quickInput","vs/css!vs/base/browser/ui/actionbar/actionbar","vs/base/browser/ui/contextview/contextview","vs/base/browser/ui/countBadge/countBadge","vs/css!vs/base/browser/ui/dropdown/dropdown","vs/css!vs/base/browser/ui/findinput/findInput","vs/css!vs/base/browser/ui/list/list","vs/base/browser/ui/hover/hoverWidget","vs/base/browser/ui/splitview/splitview","vs/base/parts/quickinput/browser/quickInputUtils","vs/editor/browser/config/elementSizeObserver","vs/editor/browser/config/tabFocus","vs/editor/browser/view/viewUserInputEvents","vs/editor/browser/viewParts/minimap/minimapCharSheet","vs/editor/browser/controller/textAreaState","vs/editor/browser/widget/diffNavigator","vs/editor/common/core/rgba","vs/editor/common/core/textChange","vs/editor/common/cursor/cursorAtomicMoveOperations","vs/editor/common/editorAction","vs/editor/common/model/bracketPairsTextModelPart/bracketPairsTree/beforeEditPositionMapper","vs/editor/common/model/bracketPairsTextModelPart/bracketPairsTree/parser","vs/editor/common/model/prefixSumComputer","vs/editor/common/model/pieceTreeTextBuffer/pieceTreeBase","vs/editor/common/model/pieceTreeTextBuffer/pieceTreeTextBuffer","vs/editor/common/modelLineProjectionData","vs/editor/common/services/unicodeTextModelHighlighter","vs/editor/common/model/guidesTextModelPart","vs/editor/common/model/bracketPairsTextModelPart/bracketPairsTree/brackets","vs/editor/common/services/editorBaseApi","vs/editor/common/languages/textToHtmlTokenizer","vs/editor/browser/viewParts/margin/margin","vs/editor/common/viewModel/minimapTokensColorTracker","vs/editor/common/viewModel/overviewZoneManager","vs/editor/contrib/comment/browser/blockCommentCommand","vs/editor/contrib/folding/browser/syntaxRangeProvider","vs/editor/contrib/format/browser/formattingEdit","vs/editor/contrib/hover/browser/hoverOperation","vs/editor/contrib/indentation/browser/indentUtils","vs/editor/contrib/inlayHints/browser/inlayHints","vs/editor/contrib/inlineCompletions/browser/consts","vs/editor/contrib/smartSelect/browser/bracketSelections","vs/editor/contrib/suggest/browser/resizable","vs/editor/standalone/common/monarch/monarchCommon","vs/editor/standalone/common/monarch/monarchLexer","vs/base/browser/ui/findinput/findInputCheckboxes","vs/base/browser/ui/tree/objectTree","vs/editor/browser/config/fontMeasurements","vs/editor/common/viewModel/viewModelDecorations","vs/editor/common/model/editStack","vs/platform/files/common/files","vs/editor/common/services/getSemanticTokens","vs/editor/contrib/codelens/browser/codelens","vs/editor/contrib/colorPicker/browser/color","vs/editor/common/cursor/cursor","vs/platform/contextkey/common/contextkeys","vs/platform/keybinding/common/resolvedKeybindingItem","vs/editor/standalone/browser/standaloneLayoutService","vs/editor/browser/services/editorWorkerService","vs/editor/contrib/suggest/browser/suggestWidgetDetails","vs/platform/history/browser/contextScopedHistoryWidget","vs/editor/browser/viewParts/lines/viewLine","vs/editor/browser/controller/mouseTarget","vs/editor/browser/viewParts/lineNumbers/lineNumbers","vs/editor/common/services/semanticTokensProviderStyling","vs/editor/contrib/quickAccess/browser/editorNavigationQuickAccess","vs/editor/contrib/symbolIcons/browser/symbolIcons","vs/editor/standalone/browser/standaloneCodeEditorService","vs/editor/contrib/format/browser/format","vs/editor/contrib/gotoSymbol/browser/goToSymbol","vs/editor/contrib/hover/browser/getHover","vs/editor/contrib/codeAction/browser/codeActionCommands","vs/editor/contrib/toggleTabFocusMode/browser/toggleTabFocusMode","vs/editor/contrib/wordOperations/browser/wordOperations","vs/editor/common/services/modelService","vs/editor/browser/widget/diffEditorWidget","vs/editor/contrib/colorPicker/browser/colorDetector","vs/editor/contrib/find/browser/findController","vs/editor/contrib/gotoError/browser/gotoError","vs/editor/contrib/gotoSymbol/browser/peek/referencesController","vs/editor/contrib/gotoSymbol/browser/link/goToDefinitionAtPosition","vs/editor/contrib/hover/browser/hover","vs/editor/contrib/inlayHints/browser/inlayHintsController","vs/editor/contrib/snippet/browser/snippetSession","vs/editor/contrib/suggest/browser/suggestController","vs/editor/contrib/inlineCompletions/browser/ghostTextController","vs/platform/workspace/common/workspaceTrust","vs/base/browser/ui/list/list","vs/base/browser/ui/list/splice","vs/base/common/cache","vs/base/common/diff/diffChange","vs/base/common/marked/marked","vs/base/common/navigator","vs/base/common/history","vs/base/common/process","vs/base/browser/ui/list/rangeMap","vs/base/browser/ui/scrollbar/scrollbarVisibilityController","vs/base/common/comparers","vs/base/browser/ui/tree/compressedObjectTreeModel","vs/base/common/fuzzyScorer","vs/base/common/search","vs/base/browser/ui/list/rowCache","vs/base/browser/ui/scrollbar/horizontalScrollbar","vs/base/browser/ui/scrollbar/verticalScrollbar","vs/base/browser/markdownRenderer","vs/base/common/uuid","vs/base/browser/defaultWorkerFactory","vs/base/parts/storage/common/storage","vs/css!vs/base/browser/ui/aria/aria","vs/css!vs/base/browser/ui/button/button","vs/base/browser/ui/button/button","vs/css!vs/base/browser/ui/checkbox/checkbox","vs/css!vs/base/browser/ui/codicons/codicon/codicon","vs/css!vs/base/browser/ui/codicons/codicon/codicon-modifiers","vs/css!vs/base/browser/ui/contextview/contextview","vs/css!vs/base/browser/ui/countBadge/countBadge","vs/css!vs/base/browser/ui/hover/hover","vs/css!vs/base/browser/ui/iconLabel/iconlabel","vs/css!vs/base/browser/ui/inputbox/inputBox","vs/css!vs/base/browser/ui/keybindingLabel/keybindingLabel","vs/css!vs/base/browser/ui/mouseCursor/mouseCursor","vs/css!vs/base/browser/ui/progressbar/progressbar","vs/base/browser/ui/progressbar/progressbar","vs/css!vs/base/browser/ui/sash/sash","vs/css!vs/base/browser/ui/scrollbar/media/scrollbars","vs/base/browser/ui/list/listPaging","vs/css!vs/base/browser/ui/splitview/splitview","vs/css!vs/base/browser/ui/table/table","vs/base/browser/ui/table/tableWidget","vs/css!vs/base/browser/ui/tree/media/tree","vs/css!vs/editor/browser/controller/textAreaHandler","vs/css!vs/editor/browser/viewParts/currentLineHighlight/currentLineHighlight","vs/css!vs/editor/browser/viewParts/decorations/decorations","vs/css!vs/editor/browser/viewParts/glyphMargin/glyphMargin","vs/css!vs/editor/browser/viewParts/indentGuides/indentGuides","vs/css!vs/editor/browser/viewParts/lineNumbers/lineNumbers","vs/css!vs/editor/browser/viewParts/lines/viewLines","vs/css!vs/editor/browser/viewParts/linesDecorations/linesDecorations","vs/css!vs/editor/browser/viewParts/marginDecorations/marginDecorations","vs/css!vs/editor/browser/viewParts/minimap/minimap","vs/css!vs/editor/browser/viewParts/overlayWidgets/overlayWidgets","vs/css!vs/editor/browser/viewParts/rulers/rulers","vs/css!vs/editor/browser/viewParts/scrollDecoration/scrollDecoration","vs/css!vs/editor/browser/viewParts/selections/selections","vs/css!vs/editor/browser/viewParts/viewCursors/viewCursors","vs/css!vs/editor/browser/widget/media/diffEditor","vs/css!vs/editor/browser/widget/media/diffReview","vs/css!vs/editor/browser/widget/media/editor","vs/css!vs/editor/contrib/anchorSelect/browser/anchorSelect","vs/css!vs/editor/contrib/bracketMatching/browser/bracketMatching","vs/css!vs/editor/contrib/codeAction/browser/lightBulbWidget","vs/css!vs/editor/contrib/codelens/browser/codelensWidget","vs/css!vs/editor/contrib/colorPicker/browser/colorPicker","vs/css!vs/editor/contrib/dnd/browser/dnd","vs/css!vs/editor/contrib/find/browser/findWidget","vs/css!vs/editor/contrib/folding/browser/folding","vs/css!vs/editor/contrib/gotoError/browser/media/gotoErrorWidget","vs/css!vs/editor/contrib/gotoSymbol/browser/link/goToDefinitionAtPosition","vs/css!vs/editor/contrib/gotoSymbol/browser/peek/referencesWidget","vs/css!vs/editor/contrib/inlineCompletions/browser/ghostText","vs/css!vs/editor/contrib/links/browser/links","vs/css!vs/editor/contrib/message/browser/messageController","vs/css!vs/editor/contrib/parameterHints/browser/parameterHints","vs/css!vs/editor/contrib/peekView/browser/media/peekViewWidget","vs/css!vs/editor/contrib/rename/browser/renameInputField","vs/css!vs/editor/contrib/snippet/browser/snippetSession","vs/css!vs/editor/contrib/suggest/browser/media/suggest","vs/css!vs/editor/contrib/unicodeHighlighter/browser/bannerController","vs/css!vs/editor/contrib/unicodeHighlighter/browser/unicodeHighlighter","vs/css!vs/editor/contrib/zoneWidget/browser/zoneWidget","vs/css!vs/editor/standalone/browser/accessibilityHelp/accessibilityHelp","vs/css!vs/editor/standalone/browser/iPadShowKeyboard/iPadShowKeyboard","vs/css!vs/editor/standalone/browser/inspectTokens/inspectTokens","vs/css!vs/editor/standalone/browser/quickInput/standaloneQuickInput","vs/css!vs/editor/standalone/browser/standalone-tokens","vs/css!vs/platform/actions/browser/menuEntryActionViewItem","vs/css!vs/platform/contextview/browser/contextMenuHandler","vs/editor/browser/config/migrateOptions","vs/editor/browser/viewParts/lines/rangeUtil","vs/editor/browser/viewParts/minimap/minimapCharRenderer","vs/editor/browser/viewParts/minimap/minimapPreBaked","vs/editor/browser/viewParts/minimap/minimapCharRendererFactory","vs/editor/common/commands/trimTrailingWhitespaceCommand","vs/editor/common/commands/surroundSelectionCommand","vs/editor/common/cursor/cursorContext","vs/editor/common/diff/diffComputer","vs/editor/common/editorTheme","vs/editor/common/languageSelector","vs/editor/common/languages/linkComputer","vs/editor/common/cursor/cursorColumnSelection","vs/editor/common/cursor/oneCursor","vs/editor/common/cursor/cursorCollection","vs/editor/common/languages/supports/characterPair","vs/editor/common/languages/supports/indentRules","vs/editor/common/languages/supports/inplaceReplaceSupport","vs/editor/common/languages/supports/onEnter","vs/editor/common/languages/supports/electricCharacter","vs/editor/common/languages/supports/tokenization","vs/editor/common/languageFeatureRegistry","vs/editor/common/model/bracketPairsTextModelPart/bracketPairsTree/nodeReader","vs/editor/common/model/bracketPairsTextModelPart/bracketPairsTree/concat23Trees","vs/editor/common/model/indentationGuesser","vs/editor/common/model/intervalTree","vs/editor/common/model/pieceTreeTextBuffer/rbTreeBase","vs/editor/common/model/mirrorTextModel","vs/editor/common/model/textModelPart","vs/editor/common/model/pieceTreeTextBuffer/pieceTreeTextBufferBuilder","vs/editor/common/services/languagesAssociations","vs/editor/common/services/semanticTokensDto","vs/editor/common/textModelBracketPairs","vs/editor/common/tokenizationRegistry","vs/editor/common/model/bracketPairsTextModelPart/bracketPairsTree/bracketPairsTree","vs/editor/common/model/bracketPairsTextModelPart/bracketPairsImpl","vs/editor/common/model/bracketPairsTextModelPart/fixBrackets","vs/editor/common/services/editorSimpleWorker","vs/editor/common/tokens/contiguousMultilineTokens","vs/editor/common/tokens/contiguousMultilineTokensBuilder","vs/editor/common/model/textModelTokens","vs/editor/common/tokens/contiguousTokensEditing","vs/editor/common/tokens/contiguousTokensStore","vs/editor/common/tokens/sparseMultilineTokens","vs/editor/common/tokens/sparseTokensStore","vs/editor/browser/viewParts/contentWidgets/contentWidgets","vs/editor/browser/viewParts/decorations/decorations","vs/editor/browser/viewParts/linesDecorations/linesDecorations","vs/editor/browser/viewParts/marginDecorations/marginDecorations","vs/editor/browser/viewParts/overlayWidgets/overlayWidgets","vs/editor/browser/viewParts/viewZones/viewZones","vs/editor/common/viewLayout/linesLayout","vs/editor/common/viewLayout/viewLinesViewportData","vs/editor/common/viewModel/modelLineProjection","vs/editor/common/viewModel/monospaceLineBreaksComputer","vs/editor/browser/viewParts/overviewRuler/overviewRuler","vs/editor/common/viewModel/viewContext","vs/editor/common/viewLayout/viewLayout","vs/editor/contrib/caretOperations/browser/moveCaretCommand","vs/editor/contrib/colorPicker/browser/colorPickerModel","vs/editor/contrib/dnd/browser/dragAndDropCommand","vs/editor/contrib/find/browser/replaceAllCommand","vs/editor/contrib/find/browser/replacePattern","vs/editor/contrib/folding/browser/foldingModel","vs/editor/contrib/folding/browser/hiddenRangeModel","vs/editor/contrib/folding/browser/indentRangeProvider","vs/editor/contrib/folding/browser/intializingRangeProvider","vs/editor/contrib/inPlaceReplace/browser/inPlaceReplaceCommand","vs/editor/contrib/inlineCompletions/browser/utils","vs/editor/contrib/linesOperations/browser/copyLinesCommand","vs/editor/contrib/linesOperations/browser/sortLinesCommand","vs/editor/contrib/smartSelect/browser/wordSelections","vs/editor/contrib/suggest/browser/completionModel","vs/editor/contrib/suggest/browser/suggestCommitCharacters","vs/editor/contrib/suggest/browser/suggestOvertypingCapturer","vs/editor/contrib/suggest/browser/wordDistance","vs/editor/standalone/common/monarch/monarchCompile","vs/editor/standalone/browser/colorizer","vs/nls!vs/base/browser/ui/actionbar/actionViewItems","vs/nls!vs/base/browser/ui/findinput/findInput","vs/nls!vs/base/browser/ui/findinput/findInputCheckboxes","vs/nls!vs/base/browser/ui/findinput/replaceInput","vs/nls!vs/base/browser/ui/iconLabel/iconLabelHover","vs/base/browser/ui/iconLabel/iconLabelHover","vs/nls!vs/base/browser/ui/inputbox/inputBox","vs/nls!vs/base/browser/ui/keybindingLabel/keybindingLabel","vs/nls!vs/base/browser/ui/tree/abstractTree","vs/base/browser/ui/tree/dataTree","vs/base/browser/ui/tree/asyncDataTree","vs/nls!vs/base/common/actions","vs/base/browser/ui/dropdown/dropdown","vs/base/browser/ui/dropdown/dropdownActionViewItem","vs/base/browser/ui/findinput/findInput","vs/base/browser/ui/findinput/replaceInput","vs/base/browser/ui/menu/menu","vs/base/parts/quickinput/browser/quickInputBox","vs/nls!vs/base/common/errorMessage","vs/base/common/errorMessage","vs/nls!vs/base/common/keybindingLabels","vs/base/browser/ui/keybindingLabel/keybindingLabel","vs/nls!vs/base/parts/quickinput/browser/quickInput","vs/nls!vs/base/parts/quickinput/browser/quickInputList","vs/base/parts/quickinput/browser/quickInputList","vs/base/parts/quickinput/browser/quickInput","vs/nls!vs/editor/browser/controller/textAreaHandler","vs/nls!vs/editor/browser/coreCommands","vs/nls!vs/editor/browser/editorExtensions","vs/nls!vs/editor/browser/widget/codeEditorWidget","vs/nls!vs/editor/browser/widget/diffEditorWidget","vs/nls!vs/editor/browser/widget/diffReview","vs/nls!vs/editor/browser/widget/inlineDiffMargin","vs/editor/browser/widget/inlineDiffMargin","vs/nls!vs/editor/common/config/editorConfigurationSchema","vs/nls!vs/editor/common/config/editorOptions","vs/editor/browser/config/charWidthReader","vs/editor/browser/view/domLineBreaksComputer","vs/editor/browser/view/viewOverlays","vs/editor/browser/viewParts/viewCursors/viewCursor","vs/nls!vs/editor/common/core/editorColorRegistry","vs/nls!vs/editor/common/editorContextKeys","vs/nls!vs/editor/common/languages/modesRegistry","vs/nls!vs/editor/common/model/editStack","vs/nls!vs/editor/common/standaloneStrings","vs/nls!vs/editor/contrib/anchorSelect/browser/anchorSelect","vs/nls!vs/editor/contrib/bracketMatching/browser/bracketMatching","vs/nls!vs/editor/contrib/caretOperations/browser/caretOperations","vs/nls!vs/editor/contrib/caretOperations/browser/transpose","vs/nls!vs/editor/contrib/clipboard/browser/clipboard","vs/nls!vs/editor/contrib/codeAction/browser/codeActionCommands","vs/nls!vs/editor/contrib/codeAction/browser/lightBulbWidget","vs/nls!vs/editor/contrib/codelens/browser/codelensController","vs/nls!vs/editor/contrib/colorPicker/browser/colorPickerWidget","vs/nls!vs/editor/contrib/comment/browser/comment","vs/nls!vs/editor/contrib/contextmenu/browser/contextmenu","vs/nls!vs/editor/contrib/cursorUndo/browser/cursorUndo","vs/nls!vs/editor/contrib/editorState/browser/keybindingCancellation","vs/nls!vs/editor/contrib/find/browser/findController","vs/nls!vs/editor/contrib/find/browser/findWidget","vs/nls!vs/editor/contrib/folding/browser/folding","vs/nls!vs/editor/contrib/folding/browser/foldingDecorations","vs/nls!vs/editor/contrib/fontZoom/browser/fontZoom","vs/nls!vs/editor/contrib/format/browser/format","vs/nls!vs/editor/contrib/format/browser/formatActions","vs/nls!vs/editor/contrib/gotoError/browser/gotoError","vs/nls!vs/editor/contrib/gotoError/browser/gotoErrorWidget","vs/nls!vs/editor/contrib/gotoSymbol/browser/goToCommands","vs/nls!vs/editor/contrib/gotoSymbol/browser/link/goToDefinitionAtPosition","vs/nls!vs/editor/contrib/gotoSymbol/browser/peek/referencesController","vs/nls!vs/editor/contrib/gotoSymbol/browser/peek/referencesTree","vs/nls!vs/editor/contrib/gotoSymbol/browser/peek/referencesWidget","vs/nls!vs/editor/contrib/gotoSymbol/browser/referencesModel","vs/nls!vs/editor/contrib/gotoSymbol/browser/symbolNavigation","vs/nls!vs/editor/contrib/hover/browser/hover","vs/nls!vs/editor/contrib/hover/browser/markdownHoverParticipant","vs/nls!vs/editor/contrib/hover/browser/markerHoverParticipant","vs/nls!vs/editor/contrib/inPlaceReplace/browser/inPlaceReplace","vs/nls!vs/editor/contrib/indentation/browser/indentation","vs/nls!vs/editor/contrib/inlayHints/browser/inlayHintsHover","vs/nls!vs/editor/contrib/inlineCompletions/browser/ghostTextController","vs/nls!vs/editor/contrib/inlineCompletions/browser/inlineCompletionsHoverParticipant","vs/nls!vs/editor/contrib/lineSelection/browser/lineSelection","vs/nls!vs/editor/contrib/linesOperations/browser/linesOperations","vs/nls!vs/editor/contrib/linkedEditing/browser/linkedEditing","vs/nls!vs/editor/contrib/links/browser/links","vs/nls!vs/editor/contrib/message/browser/messageController","vs/nls!vs/editor/contrib/multicursor/browser/multicursor","vs/nls!vs/editor/contrib/parameterHints/browser/parameterHints","vs/nls!vs/editor/contrib/parameterHints/browser/parameterHintsWidget","vs/nls!vs/editor/contrib/peekView/browser/peekView","vs/nls!vs/editor/contrib/quickAccess/browser/gotoLineQuickAccess","vs/nls!vs/editor/contrib/quickAccess/browser/gotoSymbolQuickAccess","vs/nls!vs/editor/contrib/rename/browser/rename","vs/nls!vs/editor/contrib/rename/browser/renameInputField","vs/nls!vs/editor/contrib/smartSelect/browser/smartSelect","vs/nls!vs/editor/contrib/snippet/browser/snippetController2","vs/nls!vs/editor/contrib/snippet/browser/snippetVariables","vs/nls!vs/editor/contrib/suggest/browser/suggest","vs/nls!vs/editor/contrib/suggest/browser/suggestController","vs/nls!vs/editor/contrib/suggest/browser/suggestWidget","vs/nls!vs/editor/contrib/suggest/browser/suggestWidgetDetails","vs/nls!vs/editor/contrib/suggest/browser/suggestWidgetRenderer","vs/nls!vs/editor/contrib/suggest/browser/suggestWidgetStatus","vs/nls!vs/editor/contrib/symbolIcons/browser/symbolIcons","vs/nls!vs/editor/contrib/toggleTabFocusMode/browser/toggleTabFocusMode","vs/nls!vs/editor/contrib/tokenization/browser/tokenization","vs/nls!vs/editor/contrib/unicodeHighlighter/browser/unicodeHighlighter","vs/nls!vs/editor/contrib/unusualLineTerminators/browser/unusualLineTerminators","vs/nls!vs/editor/contrib/wordHighlighter/browser/wordHighlighter","vs/nls!vs/editor/contrib/wordOperations/browser/wordOperations","vs/nls!vs/platform/actions/browser/menuEntryActionViewItem","vs/nls!vs/platform/configuration/common/configurationRegistry","vs/nls!vs/platform/contextkey/browser/contextKeyService","vs/nls!vs/platform/contextkey/common/contextkeys","vs/nls!vs/platform/history/browser/contextScopedHistoryWidget","vs/nls!vs/platform/keybinding/common/abstractKeybindingService","vs/nls!vs/platform/list/browser/listService","vs/nls!vs/platform/markers/common/markers","vs/nls!vs/platform/quickinput/browser/commandsQuickAccess","vs/nls!vs/platform/quickinput/browser/helpQuickAccess","vs/nls!vs/platform/theme/common/colorRegistry","vs/nls!vs/platform/theme/common/iconRegistry","vs/nls!vs/platform/undoRedo/common/undoRedoService","vs/nls!vs/platform/workspace/common/workspace","vs/platform/editor/common/editor","vs/platform/extensions/common/extensions","vs/platform/history/browser/historyWidgetKeybindingHint","vs/platform/instantiation/common/graph","vs/editor/common/services/languageFeaturesService","vs/editor/contrib/links/browser/getLinks","vs/editor/contrib/comment/browser/lineCommentCommand","vs/editor/contrib/linesOperations/browser/moveLinesCommand","vs/editor/contrib/parameterHints/browser/parameterHintsModel","vs/editor/contrib/suggest/browser/suggestAlternatives","vs/editor/contrib/suggest/browser/wordContextKey","vs/editor/browser/config/editorConfiguration","vs/platform/accessibility/browser/accessibilityService","vs/platform/contextkey/browser/contextKeyService","vs/platform/instantiation/common/instantiationService","vs/platform/keybinding/common/abstractKeybindingService","vs/platform/keybinding/common/baseResolvedKeybinding","vs/platform/keybinding/common/keybindingResolver","vs/platform/keybinding/common/usLayoutResolvedKeybinding","vs/platform/contextview/browser/contextViewService","vs/editor/browser/services/webWorker","vs/editor/contrib/documentSymbols/browser/documentSymbols","vs/platform/clipboard/browser/clipboardService","vs/editor/contrib/gotoError/browser/markerNavigationService","vs/platform/markers/common/markerService","vs/editor/browser/services/openerService","vs/platform/quickinput/browser/pickerQuickAccess","vs/editor/common/config/editorConfigurationSchema","vs/editor/common/services/getIconClasses","vs/editor/common/services/languagesRegistry","vs/editor/common/services/languageService","vs/editor/contrib/hover/browser/marginHover","vs/platform/configuration/common/configurationModels","vs/platform/quickinput/browser/helpQuickAccess","vs/editor/standalone/browser/quickAccess/standaloneHelpQuickAccess","vs/platform/quickinput/browser/quickAccess","vs/editor/contrib/codelens/browser/codeLensCache","vs/editor/contrib/suggest/browser/suggestMemory","vs/platform/quickinput/browser/commandsQuickAccess","vs/editor/contrib/quickAccess/browser/commandsQuickAccess","vs/platform/contextview/browser/contextMenuHandler","vs/editor/browser/controller/mouseHandler","vs/editor/browser/controller/pointerHandler","vs/editor/browser/viewParts/lines/viewLines","vs/editor/browser/services/abstractCodeEditorService","vs/editor/browser/viewParts/editorScrollbar/editorScrollbar","vs/editor/browser/viewParts/minimap/minimap","vs/editor/browser/viewParts/scrollDecoration/scrollDecoration","vs/editor/browser/viewParts/selections/selections","vs/editor/browser/viewParts/currentLineHighlight/currentLineHighlight","vs/editor/browser/viewParts/indentGuides/indentGuides","vs/editor/browser/controller/textAreaHandler","vs/editor/browser/viewParts/overviewRuler/decorationsOverviewRuler","vs/editor/browser/viewParts/rulers/rulers","vs/editor/browser/viewParts/viewCursors/viewCursors","vs/editor/common/model/bracketPairsTextModelPart/colorizedBracketPairsDecorationProvider","vs/editor/common/services/markerDecorationsService","vs/editor/contrib/codeAction/browser/lightBulbWidget","vs/editor/contrib/colorPicker/browser/colorPickerWidget","vs/editor/contrib/gotoSymbol/browser/peek/referencesTree","vs/editor/contrib/inlineCompletions/browser/ghostTextWidget","vs/editor/contrib/quickAccess/browser/gotoLineQuickAccess","vs/editor/contrib/quickAccess/browser/gotoSymbolQuickAccess","vs/editor/contrib/rename/browser/renameInputField","vs/editor/standalone/common/themes","vs/editor/browser/services/markerDecorations","vs/editor/browser/view/viewController","vs/editor/browser/view","vs/editor/contrib/anchorSelect/browser/anchorSelect","vs/editor/contrib/caretOperations/browser/caretOperations","vs/editor/contrib/caretOperations/browser/transpose","vs/editor/contrib/clipboard/browser/clipboard","vs/editor/contrib/comment/browser/comment","vs/editor/contrib/contextmenu/browser/contextmenu","vs/editor/contrib/cursorUndo/browser/cursorUndo","vs/editor/contrib/editorState/browser/keybindingCancellation","vs/editor/contrib/codeAction/browser/codeActionMenu","vs/editor/contrib/codeAction/browser/codeActionModel","vs/editor/contrib/fontZoom/browser/fontZoom","vs/editor/contrib/format/browser/formatActions","vs/editor/contrib/gotoSymbol/browser/symbolNavigation","vs/editor/contrib/indentation/browser/indentation","vs/editor/contrib/lineSelection/browser/lineSelection","vs/editor/contrib/linesOperations/browser/linesOperations","vs/editor/contrib/codeAction/browser/codeActionUi","vs/editor/contrib/codeAction/browser/codeActionContributions","vs/editor/contrib/rename/browser/rename","vs/editor/contrib/smartSelect/browser/smartSelect","vs/editor/contrib/tokenization/browser/tokenization","vs/editor/contrib/unusualLineTerminators/browser/unusualLineTerminators","vs/editor/contrib/wordPartOperations/browser/wordPartOperations","vs/editor/standalone/browser/accessibilityHelp/accessibilityHelp","vs/editor/standalone/browser/iPadShowKeyboard/iPadShowKeyboard","vs/editor/standalone/browser/inspectTokens/inspectTokens","vs/editor/standalone/browser/quickAccess/standaloneCommandsQuickAccess","vs/editor/standalone/browser/quickAccess/standaloneGotoLineQuickAccess","vs/editor/standalone/browser/quickAccess/standaloneGotoSymbolQuickAccess","vs/editor/standalone/browser/toggleHighContrast/toggleHighContrast","vs/editor/contrib/suggest/browser/suggestWidgetStatus","vs/platform/actions/common/menuService","vs/platform/contextview/browser/contextMenuService","vs/platform/opener/browser/link","vs/platform/quickinput/browser/quickInput","vs/editor/standalone/browser/quickInput/standaloneQuickInputService","vs/platform/severityIcon/common/severityIcon","vs/editor/browser/widget/diffReview","vs/editor/contrib/parameterHints/browser/parameterHintsWidget","vs/editor/contrib/parameterHints/browser/parameterHints","vs/editor/contrib/suggest/browser/suggestWidgetRenderer","vs/editor/contrib/unicodeHighlighter/browser/bannerController","vs/platform/theme/browser/iconsStyleSheet","vs/editor/standalone/browser/standaloneThemeService","vs/editor/common/viewModel/viewModelLines","vs/editor/common/viewModel/viewModelImpl","vs/editor/contrib/bracketMatching/browser/bracketMatching","vs/editor/contrib/codelens/browser/codelensWidget","vs/editor/contrib/codelens/browser/codelensController","vs/editor/contrib/colorPicker/browser/colorHoverParticipant","vs/editor/contrib/dnd/browser/dnd","vs/editor/contrib/find/browser/findDecorations","vs/editor/contrib/find/browser/findOptionsWidget","vs/editor/contrib/find/browser/findState","vs/editor/contrib/find/browser/findWidget","vs/editor/contrib/folding/browser/foldingDecorations","vs/editor/contrib/folding/browser/folding","vs/editor/contrib/hover/browser/contentHover","vs/editor/contrib/inPlaceReplace/browser/inPlaceReplace","vs/editor/contrib/linkedEditing/browser/linkedEditing","vs/editor/contrib/links/browser/links","vs/editor/contrib/multicursor/browser/multicursor","vs/editor/contrib/suggest/browser/suggestWidget","vs/editor/contrib/viewportSemanticTokens/browser/viewportSemanticTokens","vs/editor/contrib/wordHighlighter/browser/wordHighlighter","vs/editor/contrib/zoneWidget/browser/zoneWidget","vs/editor/contrib/gotoError/browser/gotoErrorWidget","vs/editor/contrib/gotoSymbol/browser/peek/referencesWidget","vs/editor/contrib/hover/browser/markerHoverParticipant","vs/editor/contrib/colorPicker/browser/colorContributions","vs/editor/contrib/inlayHints/browser/inlayHintsLocations","vs/editor/contrib/inlayHints/browser/inlayHintsHover","vs/editor/contrib/inlayHints/browser/inlayHintsContribution","vs/editor/standalone/browser/referenceSearch/standaloneReferenceSearch","vs/platform/undoRedo/common/undoRedoService","vs/editor/contrib/snippet/browser/snippetVariables","vs/editor/contrib/suggest/browser/suggestModel","vs/editor/contrib/inlineCompletions/browser/suggestWidgetInlineCompletionProvider","vs/editor/contrib/inlineCompletions/browser/suggestWidgetPreviewModel","vs/editor/contrib/inlineCompletions/browser/ghostTextModel","vs/editor/contrib/inlineCompletions/browser/inlineCompletionsHoverParticipant","vs/editor/contrib/inlineCompletions/browser/inlineCompletionsContribution","vs/editor/contrib/unicodeHighlighter/browser/unicodeHighlighter","vs/editor/editor.all","vs/editor/standalone/browser/standaloneCodeEditor","vs/editor/standalone/browser/standaloneEditor","vs/editor/standalone/browser/standaloneLanguages","vs/editor/editor.api","vs/base/browser/dompurify/dompurify","vs/editor/edcore.main"],ie=function(q){for(var e=[],L=0,m=q.length;L=0)},b}();function L(b,C,u){var g;return C.length===0?g=b:g=b.replace(/\{(\d+)\}/g,function(t,n){var i=n[0],o=C[i],c=t;return typeof o=="string"?c=o:(typeof o=="number"||typeof o=="boolean"||o===void 0||o===null)&&(c=String(o)),c}),u.isPseudo&&(g="\uFF3B"+g.replace(/[aouei]/g,"$&$&")+"\uFF3D"),g}function m(b,C){var u=b[C];return u||(u=b["*"],u)?u:null}function k(b,C,u){for(var g=[],t=3;t1?L-1:0),k=1;k/gm),gn=ft(/^data-[\-\w.\u00B7-\uFFFF]/),fn=ft(/^aria-[\-\w]+$/),mn=ft(/^(?:(?:(?:f|ht)tps?|mailto|tel|callto|cid|xmpp):|[^a-z]|[a-z+.\-]+(?:[^a-z+.\-:]|$))/i),pn=ft(/^(?:\w+script|data):/i),Cn=ft(/[\u0000-\u0020\u00A0\u1680\u180E\u2000-\u2029\u205F\u3000]/g),Dt=typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?function(q){return typeof q}:function(q){return q&&typeof Symbol=="function"&&q.constructor===Symbol&&q!==Symbol.prototype?"symbol":typeof q};function gt(q){if(Array.isArray(q)){for(var e=0,L=Array(q.length);e0&&arguments[0]!==void 0?arguments[0]:vn(),e=function(Ee){return bi(Ee)};if(e.version="2.3.1",e.removed=[],!q||!q.document||q.document.nodeType!==9)return e.isSupported=!1,e;var L=q.document,m=q.document,k=q.DocumentFragment,I=q.HTMLTemplateElement,w=q.Node,b=q.Element,C=q.NodeFilter,u=q.NamedNodeMap,g=u===void 0?q.NamedNodeMap||q.MozNamedAttrMap:u,t=q.Text,n=q.Comment,i=q.DOMParser,o=q.trustedTypes,c=b.prototype,d=Rt(c,"cloneNode"),r=Rt(c,"nextSibling"),s=Rt(c,"childNodes"),a=Rt(c,"parentNode");if(typeof I=="function"){var l=m.createElement("template");l.content&&l.content.ownerDocument&&(m=l.content.ownerDocument)}var h=bn(o,L),f=h&&ue?h.createHTML(""):"",S=m,v=S.implementation,p=S.createNodeIterator,_=S.createDocumentFragment,y=S.getElementsByTagName,E=L.importNode,N={};try{N=St(m).documentMode?m.documentMode:{}}catch{}var D={};e.isSupported=typeof a=="function"&&v&&typeof v.createHTMLDocument!="undefined"&&N!==9;var M=un,B=hn,O=gn,T=fn,A=pn,P=Cn,F=mn,W=null,R=Qe({},[].concat(gt(mi),gt(jt),gt(qt),gt($t),gt(pi))),z=null,j=Qe({},[].concat(gt(Ci),gt(Gt),gt(vi),gt(Pt))),$=null,G=null,J=!0,re=!0,se=!1,Z=!1,V=!1,K=!1,X=!1,oe=!1,ae=!1,ee=!0,ue=!1,le=!0,x=!0,H=!1,U={},Q=null,Y=Qe({},["annotation-xml","audio","colgroup","desc","foreignobject","head","iframe","math","mi","mn","mo","ms","mtext","noembed","noframes","noscript","plaintext","script","style","svg","template","thead","title","video","xmp"]),ne=null,ge=Qe({},["audio","video","img","source","image","track"]),fe=null,ce=Qe({},["alt","class","for","id","label","name","pattern","placeholder","role","summary","title","value","style","xmlns"]),he="http://www.w3.org/1998/Math/MathML",ve="http://www.w3.org/2000/svg",Ce="http://www.w3.org/1999/xhtml",be=Ce,de=!1,me=null,_e=m.createElement("form"),ye=function(Ee){me&&me===Ee||((!Ee||(typeof Ee=="undefined"?"undefined":Dt(Ee))!=="object")&&(Ee={}),Ee=St(Ee),W="ALLOWED_TAGS"in Ee?Qe({},Ee.ALLOWED_TAGS):R,z="ALLOWED_ATTR"in Ee?Qe({},Ee.ALLOWED_ATTR):j,fe="ADD_URI_SAFE_ATTR"in Ee?Qe(St(ce),Ee.ADD_URI_SAFE_ATTR):ce,ne="ADD_DATA_URI_TAGS"in Ee?Qe(St(ge),Ee.ADD_DATA_URI_TAGS):ge,Q="FORBID_CONTENTS"in Ee?Qe({},Ee.FORBID_CONTENTS):Y,$="FORBID_TAGS"in Ee?Qe({},Ee.FORBID_TAGS):{},G="FORBID_ATTR"in Ee?Qe({},Ee.FORBID_ATTR):{},U="USE_PROFILES"in Ee?Ee.USE_PROFILES:!1,J=Ee.ALLOW_ARIA_ATTR!==!1,re=Ee.ALLOW_DATA_ATTR!==!1,se=Ee.ALLOW_UNKNOWN_PROTOCOLS||!1,Z=Ee.SAFE_FOR_TEMPLATES||!1,V=Ee.WHOLE_DOCUMENT||!1,oe=Ee.RETURN_DOM||!1,ae=Ee.RETURN_DOM_FRAGMENT||!1,ee=Ee.RETURN_DOM_IMPORT!==!1,ue=Ee.RETURN_TRUSTED_TYPE||!1,X=Ee.FORCE_BODY||!1,le=Ee.SANITIZE_DOM!==!1,x=Ee.KEEP_CONTENT!==!1,H=Ee.IN_PLACE||!1,F=Ee.ALLOWED_URI_REGEXP||F,be=Ee.NAMESPACE||Ce,Z&&(re=!1),ae&&(oe=!0),U&&(W=Qe({},[].concat(gt(pi))),z=[],U.html===!0&&(Qe(W,mi),Qe(z,Ci)),U.svg===!0&&(Qe(W,jt),Qe(z,Gt),Qe(z,Pt)),U.svgFilters===!0&&(Qe(W,qt),Qe(z,Gt),Qe(z,Pt)),U.mathMl===!0&&(Qe(W,$t),Qe(z,vi),Qe(z,Pt))),Ee.ADD_TAGS&&(W===R&&(W=St(W)),Qe(W,Ee.ADD_TAGS)),Ee.ADD_ATTR&&(z===j&&(z=St(z)),Qe(z,Ee.ADD_ATTR)),Ee.ADD_URI_SAFE_ATTR&&Qe(fe,Ee.ADD_URI_SAFE_ATTR),Ee.FORBID_CONTENTS&&(Q===Y&&(Q=St(Q)),Qe(Q,Ee.FORBID_CONTENTS)),x&&(W["#text"]=!0),V&&Qe(W,["html","head","body"]),W.table&&(Qe(W,["tbody"]),delete $.tbody),rt&&rt(Ee),me=Ee)},ke=Qe({},["mi","mo","mn","ms","mtext"]),Te=Qe({},["foreignobject","desc","title","annotation-xml"]),Me=Qe({},jt);Qe(Me,qt),Qe(Me,dn);var We=Qe({},$t);Qe(We,cn);var xe=function(Ee){var Re=a(Ee);(!Re||!Re.tagName)&&(Re={namespaceURI:Ce,tagName:"template"});var Ae=_t(Ee.tagName),je=_t(Re.tagName);if(Ee.namespaceURI===ve)return Re.namespaceURI===Ce?Ae==="svg":Re.namespaceURI===he?Ae==="svg"&&(je==="annotation-xml"||ke[je]):Boolean(Me[Ae]);if(Ee.namespaceURI===he)return Re.namespaceURI===Ce?Ae==="math":Re.namespaceURI===ve?Ae==="math"&&Te[je]:Boolean(We[Ae]);if(Ee.namespaceURI===Ce){if(Re.namespaceURI===ve&&!Te[je]||Re.namespaceURI===he&&!ke[je])return!1;var et=Qe({},["title","style","font","a","script"]);return!We[Ae]&&(et[Ae]||!Me[Ae])}return!1},He=function(Ee){It(e.removed,{element:Ee});try{Ee.parentNode.removeChild(Ee)}catch{try{Ee.outerHTML=f}catch{Ee.remove()}}},Le=function(Ee,Re){try{It(e.removed,{attribute:Re.getAttributeNode(Ee),from:Re})}catch{It(e.removed,{attribute:null,from:Re})}if(Re.removeAttribute(Ee),Ee==="is"&&!z[Ee])if(oe||ae)try{He(Re)}catch{}else try{Re.setAttribute(Ee,"")}catch{}},Se=function(Ee){var Re=void 0,Ae=void 0;if(X)Ee=""+Ee;else{var je=gi(Ee,/^[\r\n\t ]+/);Ae=je&&je[0]}var et=h?h.createHTML(Ee):Ee;if(be===Ce)try{Re=new i().parseFromString(et,"text/html")}catch{}if(!Re||!Re.documentElement){Re=v.createDocument(be,"template",null);try{Re.documentElement.innerHTML=de?"":et}catch{}}var Ze=Re.body||Re.documentElement;return Ee&&Ae&&Ze.insertBefore(m.createTextNode(Ae),Ze.childNodes[0]||null),be===Ce?y.call(Re,V?"html":"body")[0]:V?Re.documentElement:Ze},De=function(Ee){return p.call(Ee.ownerDocument||Ee,Ee,C.SHOW_ELEMENT|C.SHOW_COMMENT|C.SHOW_TEXT,null,!1)},Pe=function(Ee){return Ee instanceof t||Ee instanceof n?!1:typeof Ee.nodeName!="string"||typeof Ee.textContent!="string"||typeof Ee.removeChild!="function"||!(Ee.attributes instanceof g)||typeof Ee.removeAttribute!="function"||typeof Ee.setAttribute!="function"||typeof Ee.namespaceURI!="string"||typeof Ee.insertBefore!="function"},Fe=function(Ee){return(typeof w=="undefined"?"undefined":Dt(w))==="object"?Ee instanceof w:Ee&&(typeof Ee=="undefined"?"undefined":Dt(Ee))==="object"&&typeof Ee.nodeType=="number"&&typeof Ee.nodeName=="string"},ze=function(Ee,Re,Ae){!D[Ee]||on(D[Ee],function(je){je.call(e,Re,Ae,me)})},Ke=function(Ee){var Re=void 0;if(ze("beforeSanitizeElements",Ee,null),Pe(Ee)||gi(Ee.nodeName,/[\u0080-\uFFFF]/))return He(Ee),!0;var Ae=_t(Ee.nodeName);if(ze("uponSanitizeElement",Ee,{tagName:Ae,allowedTags:W}),!Fe(Ee.firstElementChild)&&(!Fe(Ee.content)||!Fe(Ee.content.firstElementChild))&&mt(/<[/\w]/g,Ee.innerHTML)&&mt(/<[/\w]/g,Ee.textContent)||Ae==="select"&&mt(/