-
-## Contribute
-
-to add another provider, its very simple:
-1. create a new file in [g4f/Provider/Providers](./g4f/Provider/Providers) with the name of the Provider
-2. in the file, paste the *Boilerplate* you can find in [g4f/Provider/Provider.py](./g4f/Provider/Provider.py):
-
-```py
-import os
-from ..typing import sha256, Dict, get_type_hints
-
-url = None
-model = None
-supports_stream = False
-needs_auth = False
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- return
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
-
-```
-
-3. Here, you can adjust the settings, for example if the website does support streaming, set `supports_stream` to `True`...
-4. Write code to request the provider in `_create_completion` and `yield` the response, *even if* its a one-time response, do not hesitate to look at other providers for inspiration
-5. Add the Provider Name in [g4f/Provider/__init__.py](./g4f/Provider/__init__.py)
-
-```py
-from . import Provider
-from .Providers import (
- ...,
- ProviderNameHere
-)
-```
-
-6. You are done !, test the provider by calling it:
-```py
-import g4f
-
-response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Provider.PROVIDERNAME,
- messages=[{"role": "user", "content": "test"}], stream=g4f.Provider.PROVIDERNAME.supports_stream)
-
-for message in response:
- print(message, flush=True, end='')
-```
-
-## ChatGPT clone
-
-> Currently implementing new features and trying to scale it, please be patient it may be unstable
-> https://chat.g4f.ai/chat
-> This site was developed by me and includes **gpt-4/3.5**, **internet access** and **gpt-jailbreak's** like DAN
-> Run locally here: https://github.com/xtekky/chatgpt-clone
-
-## Copyright:
-
-This program is licensed under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.txt)
-
-## Copyright Notice:
-
-```
-xtekky/gpt4free: Copyright (C) 2023 xtekky
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program. If not, see .
-```
-
-
-## Star History
-
-
-
-
diff --git a/spaces/101-5/gpt4free/g4f/.v1/testing/aiassistest.py b/spaces/101-5/gpt4free/g4f/.v1/testing/aiassistest.py
deleted file mode 100644
index 57a34f1580ac3dc135ac025dd74236cbedbeb3c7..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/.v1/testing/aiassistest.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import aiassist
-
-question1 = "Who won the world series in 2020?"
-req = aiassist.Completion.create(prompt=question1)
-answer = req["text"]
-message_id = req["parentMessageId"]
-
-question2 = "Where was it played?"
-req2 = aiassist.Completion.create(prompt=question2, parentMessageId=message_id)
-answer2 = req2["text"]
-
-print(answer)
-print(answer2)
diff --git a/spaces/123Kumar/vits-uma-genshin-honkai123/models.py b/spaces/123Kumar/vits-uma-genshin-honkai123/models.py
deleted file mode 100644
index 52e15d1b9775038fd6e82b2efe6f95f51c66802d..0000000000000000000000000000000000000000
--- a/spaces/123Kumar/vits-uma-genshin-honkai123/models.py
+++ /dev/null
@@ -1,534 +0,0 @@
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-import attentions
-import monotonic_align
-
-from torch.nn import Conv1d, ConvTranspose1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-
-
-class StochasticDurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
- super().__init__()
- filter_channels = in_channels # it needs to be removed from future version.
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.log_flow = modules.Log()
- self.flows = nn.ModuleList()
- self.flows.append(modules.ElementwiseAffine(2))
- for i in range(n_flows):
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.flows.append(modules.Flip())
-
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- self.post_flows = nn.ModuleList()
- self.post_flows.append(modules.ElementwiseAffine(2))
- for i in range(4):
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.post_flows.append(modules.Flip())
-
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
-
- def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
- x = torch.detach(x)
- x = self.pre(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.convs(x, x_mask)
- x = self.proj(x) * x_mask
-
- if not reverse:
- flows = self.flows
- assert w is not None
-
- logdet_tot_q = 0
- h_w = self.post_pre(w)
- h_w = self.post_convs(h_w, x_mask)
- h_w = self.post_proj(h_w) * x_mask
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
- z_q = e_q
- for flow in self.post_flows:
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
- logdet_tot_q += logdet_q
- z_u, z1 = torch.split(z_q, [1, 1], 1)
- u = torch.sigmoid(z_u) * x_mask
- z0 = (w - u) * x_mask
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
- logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
-
- logdet_tot = 0
- z0, logdet = self.log_flow(z0, x_mask)
- logdet_tot += logdet
- z = torch.cat([z0, z1], 1)
- for flow in flows:
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
- logdet_tot = logdet_tot + logdet
- nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
- return nll + logq # [b]
- else:
- flows = list(reversed(self.flows))
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
- for flow in flows:
- z = flow(z, x_mask, g=x, reverse=reverse)
- z0, z1 = torch.split(z, [1, 1], 1)
- logw = z0
- return logw
-
-
-class DurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
- super().__init__()
-
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
-
- self.drop = nn.Dropout(p_dropout)
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_1 = modules.LayerNorm(filter_channels)
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_2 = modules.LayerNorm(filter_channels)
- self.proj = nn.Conv1d(filter_channels, 1, 1)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
- def forward(self, x, x_mask, g=None):
- x = torch.detach(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.norm_1(x)
- x = self.drop(x)
- x = self.conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.norm_2(x)
- x = self.drop(x)
- x = self.proj(x * x_mask)
- return x * x_mask
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- n_vocab,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout):
- super().__init__()
- self.n_vocab = n_vocab
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
-
- self.emb = nn.Embedding(n_vocab, hidden_channels)
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
-
- self.encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths):
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return x, m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(weight_norm(
- ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
- k, u, padding=(k-u)//2)))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel//(2**(i+1))
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i*self.num_kernels+j](x)
- else:
- xs += self.resblocks[i*self.num_kernels+j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print('Removing weight norm...')
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2,3,5,7,11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=0,
- gin_channels=0,
- use_sdp=True,
- **kwargs):
-
- super().__init__()
- self.n_vocab = n_vocab
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.n_speakers = n_speakers
- self.gin_channels = gin_channels
-
- self.use_sdp = use_sdp
-
- self.enc_p = TextEncoder(n_vocab,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
-
- if use_sdp:
- self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
- else:
- self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
-
- if n_speakers > 1:
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
- def forward(self, x, x_lengths, y, y_lengths, sid=None):
-
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
-
- with torch.no_grad():
- # negative cross-entropy
- s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
- neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
- neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
- neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
-
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
-
- w = attn.sum(2)
- if self.use_sdp:
- l_length = self.dp(x, x_mask, w, g=g)
- l_length = l_length / torch.sum(x_mask)
- else:
- logw_ = torch.log(w + 1e-6) * x_mask
- logw = self.dp(x, x_mask, g=g)
- l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
-
- # expand prior
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
-
- z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
- o = self.dec(z_slice, g=g)
- return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
- device = next(self.parameters()).device # 获取模型所在的设备
- x, m_p, logs_p, x_mask = self.enc_p(x.to(device), x_lengths.to(device))
- if self.n_speakers > 0:
- g = self.emb_g(sid.to(device)).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- if self.use_sdp:
- logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
- else:
- logw = self.dp(x, x_mask, g=g)
- w = torch.exp(logw) * x_mask * length_scale
- w_ceil = torch.ceil(w)
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = commons.generate_path(w_ceil, attn_mask)
-
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
-
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
- z = self.flow(z_p, y_mask, g=g, reverse=True)
- o = self.dec((z * y_mask)[:,:,:max_len], g=g)
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
-
- def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
- assert self.n_speakers > 0, "n_speakers have to be larger than 0."
- g_src = self.emb_g(sid_src).unsqueeze(-1)
- g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
- z_p = self.flow(z, y_mask, g=g_src)
- z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
- o_hat = self.dec(z_hat * y_mask, g=g_tgt)
- return o_hat, y_mask, (z, z_p, z_hat)
-
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Full Version Bandicam Free Download The Ultimate Guide to Screen Recording.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Full Version Bandicam Free Download The Ultimate Guide to Screen Recording.md
deleted file mode 100644
index 3b4a38fa1982335d9179f16b2d7a1efb9e84a4cc..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Full Version Bandicam Free Download The Ultimate Guide to Screen Recording.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-
How to Get Full Version Bandicam Free Download
-
If you are looking for a screen recorder that can capture high-quality videos without lagging or watermarking, you might be interested in Bandicam. Bandicam is a popular screen recording software that can record anything on your PC screen, such as games, online videos, webinars, lectures, and more. It can also record audio from your microphone or speakers, and save the recorded files in various formats, such as MP4, AVI, MOV, etc.
However, Bandicam is not a free software. You need to pay $39.95 for a single license, or $59.95 for a two-PC license. If you want to use Bandicam without any limitations or watermarks, you need to purchase the full version. But what if you don't have the budget or don't want to spend money on a screen recorder? Is there a way to get full version Bandicam free download?
-
The answer is yes, but you need to be careful. There are many websites that claim to offer full version Bandicam free download, but most of them are scams or malware. They may infect your computer with viruses, spyware, ransomware, or other malicious programs that can harm your system or steal your personal information. Some of them may also ask you to complete surveys, download additional software, or enter your credit card details before giving you the download link. These are all red flags that you should avoid.
-
The only safe and legal way to get full version Bandicam free download is to use the official trial version from the Bandicam website. The trial version allows you to use all the features of Bandicam for 10 minutes per recording session, and it adds a watermark to the recorded videos. However, you can remove the watermark and extend the recording time by using a video editing software, such as Windows Movie Maker, VLC Media Player, or VideoProc. Here are the steps to do so:
Select the recording mode and adjust the settings according to your needs.
-
Click the "REC" button to start recording your screen.
-
When you are done, click the "Stop" button to save the recorded file.
-
Open your video editing software and import the recorded file.
-
Crop or trim the video to remove the watermark and any unwanted parts.
-
Save or export the edited video in your preferred format and quality.
-
-
Congratulations! You have successfully got full version Bandicam free download without paying anything or risking your computer security. You can now enjoy recording your screen with Bandicam without any limitations or watermarks.
-
-
Why Choose Bandicam as Your Screen Recorder?
-
-
Bandicam is one of the best screen recording software in the market. It has many advantages over other screen recorders, such as:
-
-
It can record high-quality videos up to 4K resolution and 480 FPS.
-
It can compress the recorded files to a smaller size without losing quality.
-
It can record multiple audio sources simultaneously, such as your voice and the system sound.
-
It can record external devices, such as webcam, smartphone, Xbox, PlayStation, etc.
-
It can add various effects and annotations to your videos, such as mouse cursor, logo, webcam overlay, drawing tools, etc.
-
It can capture screenshots and save them in various formats, such as PNG, JPG, BMP, etc.
-
It has a simple and intuitive interface that is easy to use for beginners and professionals alike.
-
-
With Bandicam, you can record anything on your screen with ease and efficiency. Whether you want to record your gameplay, online streaming, tutorial, presentation, or anything else, Bandicam can handle it all.
-
-
How to Use Bandicam Effectively?
-
To get the most out of Bandicam, you need to know how to use it effectively. Here are some tips and tricks that can help you improve your screen recording experience with Bandicam:
-
-
Before recording, make sure you have enough disk space and CPU power to avoid lagging or crashing.
-
Select the appropriate recording mode for your purpose. Bandicam has four recording modes: Screen Recording Mode, Game Recording Mode, Device Recording Mode, and Webcam Recording Mode.
-
Adjust the settings according to your needs. You can change the video format, quality, codec, frame rate, audio source, hotkeys, and more in the settings menu.
-
Use the preview window to check the recording area and adjust the size and position of the window.
-
Use the pause/resume function to control the recording process. You can also use the auto-complete recording function to set a time limit or file size limit for your recording.
-
Use the edit function to trim or split your recorded files. You can also use the extract function to extract audio or images from your recorded files.
-
Use the upload function to share your recorded files online. You can upload your videos directly to YouTube or Vimeo from Bandicam.
-
-
By following these tips and tricks, you can use Bandicam more effectively and create amazing screen recordings with ease.
ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Deadliest Catch Alaskan Storm English PCDVD 2Lions-Team Game Download VERIFIED.md b/spaces/1gistliPinn/ChatGPT4/Examples/Deadliest Catch Alaskan Storm English PCDVD 2Lions-Team Game Download VERIFIED.md
deleted file mode 100644
index b8c89e82239aa0f336f62258b5796711252213c5..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Deadliest Catch Alaskan Storm English PCDVD 2Lions-Team Game Download VERIFIED.md
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
Deadliest Catch Alaskan Storm: A Realistic and Challenging Simulation Game
-
If you are a fan of the Discovery Channel's hit show Deadliest Catch, you might want to try out the video game based on it: Deadliest Catch Alaskan Storm. This game lets you experience the thrill and danger of crab fishing in the Bering Sea, as you control one of the real-life boats featured on the show, or create your own custom vessel. You will have to manage your crew, equipment, and finances, as well as deal with unpredictable weather, ice, and wildlife. The game features realistic graphics, physics, and sounds, as well as a dynamic campaign mode that changes according to your actions. You can also play in free mode, where you can explore the vast ocean at your own pace.
-
Deadliest Catch Alaskan Storm was released in 2008 for Windows PC and Xbox 360. It was developed by Liquid Dragon Studios LLC and published by Greenwave Games. The game received positive reviews from critics and fans alike, who praised its authenticity and difficulty. However, some also criticized its repetitiveness and lack of online multiplayer mode. If you are looking for a challenging and immersive simulation game that will test your skills and nerves, you might want to download Deadliest Catch Alaskan Storm today.
-
Deadliest Catch Alaskan Storm English PCDVD 2Lions-Team game download
You can download Deadliest Catch Alaskan Storm from various sources on the internet, such as KcTorrent[^1^], Retrolorian[^2^], or npm[^3^]. However, be careful of potential viruses or malware that might harm your computer. Always scan your files before opening them, and use a reliable antivirus software. Alternatively, you can buy the game from online retailers such as Amazon or eBay.
How to Play Deadliest Catch Alaskan Storm
-
Deadliest Catch Alaskan Storm is not a simple arcade game. It is a complex and realistic simulation that requires strategy, patience, and skill. If you want to succeed as a crab fisherman, you will need to learn how to play the game properly. Here are some tips and tricks to help you get started.
-
-
The game has two main modes: Career and Missions. In Career mode, you can create your own captain and boat, and embark on a full season of crab fishing. You will have to manage your crew, finances, equipment, and strategy, as well as deal with random events and challenges. In Missions mode, you can play various scenarios that test your skills and knowledge of the game. You can also unlock new boats and crew members by completing missions.
-
The game has a tutorial that teaches you the basics of the game, such as how to navigate, set pots, haul pots, sort crabs, and dock at harbors. You can access the tutorial from the main menu or from the career mode. The tutorial consists of five days of fishing, each with a different objective and difficulty level. You can replay the tutorial at any time if you need a refresher.
-
The game has a dynamic weather system that affects the gameplay. You will encounter various weather conditions, such as fog, rain, snow, wind, storms, and ice. The weather can change rapidly and unpredictably, so you need to be prepared for anything. The weather can also affect your boat's performance, visibility, stability, and safety. You can check the weather forecast from your map screen or from the radio.
-
The game has a realistic physics engine that simulates the movement of your boat and the ocean waves. You will have to steer your boat carefully and adjust your speed according to the sea conditions. You will also have to balance your boat's weight by distributing your pots and crabs evenly. If your boat is too heavy or unbalanced, it can sink or capsize.
-
The game has a detailed crab fishing system that requires strategy and skill. You will have to choose your fishing grounds wisely by checking the map for hot spots and survey data. You will also have to set your pots in optimal locations and depths by using your sonar and GPS. You will have to monitor your pots' soak time and bait level by using your pot tracker. You will have to haul your pots efficiently and safely by using your crane and hook. You will have to sort your crabs quickly and accurately by using your sorting table and bin.
-
The game has a realistic crew management system that affects the gameplay. You will have to hire, fire, train, motivate, and pay your crew members. Each crew member has different skills, personalities, traits, and salaries. You will have to assign them tasks according to their abilities and preferences. You will also have to keep them happy and healthy by providing them food, rest, medical attention, and entertainment. If your crew is unhappy or injured, they can quit or perform poorly.
-
The game has a realistic economy system that affects the gameplay. You will have to buy, sell, upgrade, repair, and maintain your boat and equipment. Each item has different costs, benefits, and drawbacks. You will also have to deal with various expenses, such as fuel, bait, harbor fees, taxes, insurance, fines, etc. You will also have to deal with fluctuating crab prices that depend on supply and demand. Your goal is to make as much profit as possible by catching more crabs at lower costs.
-
-
Deadliest Catch Alaskan Storm is a challenging and rewarding game that will test your skills as a crab fisherman. If you follow these tips and tricks, you will be able to enjoy the game more and achieve better results.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Elements Of Astro Mechanics Van De Kamp Pdf 19 PORTABLE.md b/spaces/1gistliPinn/ChatGPT4/Examples/Elements Of Astro Mechanics Van De Kamp Pdf 19 PORTABLE.md
deleted file mode 100644
index 83703139fd7519b313e021375f3289d1bfd1d1ae..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Elements Of Astro Mechanics Van De Kamp Pdf 19 PORTABLE.md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
this hands-on stem class focuses on space exploration. students will learn about outer space, space shuttles, and how they work. students will learn about how astronauts live and work in space, and what happens when they take off and return to earth. students will explore different parts of the solar system and how they work. at the end of the class, students will have a chance to explore a planetarium and learn about outer space. min 8/max 16. registration deadline: march 2
-
this hands-on stem class focuses on outer space. students will learn about outer space, space shuttles, and how they work. students will learn about how astronauts live and work in space, and what happens when they take off and return to earth. students will explore different parts of the solar system and how they work. at the end of the class, students will have a chance to explore a planetarium and learn about outer space. min 8/max 16. registration deadline: november 9
this hands-on stem class focuses on outer space. students will learn about outer space, space shuttles, and how they work. students will learn about how astronauts live and work in space, and what happens when they take off and return to earth. students will explore different parts of the solar system and how they work. at the end of the class, students will have a chance to explore a planetarium and learn about outer space. min 8/max 16. registration deadline: march 2
-
this hands-on stem class focuses on space exploration. students will learn about outer space, space shuttles, and how they work. students will learn about how astronauts live and work in space, and what happens when they take off and return to earth. students will explore different parts of the solar system and how they work. at the end of the class, students will have a chance to explore a planetarium and learn about outer space. min 8/max 16. registration deadline: november 9
Descargar Bingo Player APK: How to Play Bingo Online for Free and Win Real Money
-
If you love playing bingo games, you might be interested in descargar bingo player apk, a free app that lets you play bingo online for free and win real money on your Android device. In this article, we will tell you everything you need to know about this app, including its features, benefits, how to download and install it, how to play bingo online for free and win real money with it, and some alternatives to it. By the end of this article, you will be ready to join millions of bingo players from around the world and have fun while earning cash prizes.
Bingo Player APK is a free app that lets you play bingo games on your Android device. You can download it from a trusted source and install it on your phone or tablet. With this app, you can enjoy all your favorite bingo games anytime and anywhere, without needing an internet connection or spending any money.
-
Features and benefits of Bingo Player APK
-
Bingo Player APK has many features and benefits that make it one of the best bingo apps for Android devices. Here are some of them:
-
Multiple game modes and themes
-
You can choose from different game modes, such as classic, speed, or blackout bingo, and play with one, two, three, or four cards at a time. You can also customize your bingo cards and daubers with different colors and patterns. Plus, you can explore various themes, such as adventure, fantasy, or casino, and enjoy stunning graphics and sound effects.
-
Daily bonuses and rewards
-
You can earn coins and power-ups by playing bingo games, completing daily challenges, participating in events and tournaments, or spinning the lucky wheel. You can use these coins and power-ups to buy more bingo cards, boost your chances of winning, or unlock new levels and features. You can also win real cash prizes by playing in cash games or redeeming your coins for gift cards or PayPal cash.
-
Chat with other players and make friends
-
You can chat with other bingo players from around the world in real-time while you play. You can send messages, emojis, gifts, or friend requests to other players. You can also join or create a bingo club and play with your friends or other club members. Playing bingo online with other people is more fun and social than playing alone.
-
descargar pingo player apk gratis
-bingo player apk android download
-cómo instalar bingo player apk en mi celular
-bingo player apk xapk última versión
-descargar bingo player apk para pc
-bingo player apk compatible con protocolos de vídeo streaming
-bingo player apk con temporizador de juego
-descargar bingo player apk sin anuncios
-bingo player apk modificado con funciones premium
-bingo player apk para jugar bingo con amigos
-descargar bingo player apk desde apkcombo
-bingo player apk con tableros de bingo aleatorios
-bingo player apk con opción de guardar tu tablero favorito
-descargar bingo player apk seguro y confiable
-bingo player apk con soporte para android 5.0 o superior
-
Play offline or online
-
You can play bingo offline or online with Bingo Player APK. If you have an internet connection, you can play bingo online with other players, join cash games or tournaments, chat with other players, or access new features and updates. If you don't have an internet connection, you can still play bingo offline with the app. You can play with the same cards and power-ups that you have online, and you can still win coins and rewards. You can also sync your progress and winnings when you go online again.
-
How to download and install Bingo Player APK?
-
Downloading and installing Bingo Player APK is easy and fast. You just need to follow these steps:
-
Steps to download and install Bingo Player APK from a trusted source
-
-
Go to a trusted website that offers Bingo Player APK, such as [APKPure] or [Uptodown].
-
Find the Bingo Player APK file and tap on the download button.
-
Wait for the download to finish and then open the file.
-
Allow the installation of unknown sources if prompted by your device.
-
Follow the instructions on the screen to install the app.
-
Launch the app and enjoy playing bingo online for free and win real money.
-
-
Tips to avoid malware and viruses when downloading APK files
-
APK files are Android application packages that can be downloaded and installed on Android devices. However, not all APK files are safe and reliable. Some APK files may contain malware or viruses that can harm your device or steal your personal information. To avoid this, you should follow these tips:
-
-
Only download APK files from trusted and reputable sources, such as the ones we mentioned above.
-
Check the reviews and ratings of the APK files before downloading them.
-
Scan the APK files with an antivirus or malware scanner before installing them.
-
Do not grant unnecessary permissions or access to the APK files when installing them.
-
Delete the APK files after installing them to save space and avoid clutter.
-
-
How to play bingo online for free and win real money with Bingo Player APK?
-
Playing bingo online for free and win real money with Bingo Player APK is simple and fun. You just need to follow these steps:
-
Choose a game mode and a bingo room
-
You can choose from different game modes, such as classic, speed, or blackout bingo, depending on your preference and skill level. You can also choose from different bingo rooms, such as adventure, fantasy, or casino, depending on your mood and taste. Each bingo room has its own theme, graphics, sound effects, and prizes.
-
Buy bingo cards and daub the numbers
-
You can buy bingo cards with coins or cash, depending on the game mode and the bingo room. You can play with one, two, three, or four cards at a time. The more cards you play with, the higher your chances of winning. Once you have your cards, you can start daubing the numbers that are called out by the app. You can daub manually or automatically, depending on your preference.
-
Use power-ups and boosters to increase your chances of winning
-
You can use power-ups and boosters to enhance your bingo experience and increase your chances of winning. Power-ups are special items that you can use during the game, such as extra balls, double daubs, free spaces, or instant bingos. Boosters are special items that you can use before the game starts, such as extra cards, lucky daubers, or VIP passes. You can earn power-ups and boosters by playing games, completing challenges, spinning the wheel, or buying them with coins or cash.
-
Claim your bingo prizes and cash out your winnings
-
You can claim your bingo prizes by completing a pattern on your card, such as a line, a column, a diagonal, a four corners, or a full card. You can also claim multiple bingos on one card if possible. The more bingos you claim, the bigger your prizes. You can win coins, power-ups, boosters, or real cash prizes by playing bingo online with Bingo Player APK. You can also win jackpots or special prizes by playing in special games or events. You can cash out your winnings by redeeming your coins for gift cards or PayPal cash. You can also withdraw your cash prizes directly to your bank account or e-wallet.
-
Alternatives to Bingo Player APK
-
If you are looking for other options to play bingo online for free and win real money, you can try these alternatives to Bingo Player APK:
-
Other bingo apps for Android devices
-
There are many other bingo apps for Android devices that you can download and install on your phone or tablet. Some of the most popular ones are:
-
-
[Bingo Blitz] - A bingo app that lets you travel around the world and play bingo games in different cities and landmarks. You can also collect souvenirs, cook dishes, and meet new friends along the way.
-
[Bingo Bash] - A bingo app that lets you play bingo games with different twists and features. You can also play mini-games, collect items, and join events and tournaments.
-
[Bingo Pop] - A bingo app that lets you play bingo games with beautiful graphics and animations. You can also explore different scenes, unlock new levels, and win huge jackpots.
-
-
Websites and desktop apps for playing bingo online
-
If you prefer playing bingo online on your computer, you can visit these websites or download these desktop apps that offer bingo games:
-
-
[Paddy Power Bingo] - A website that offers a variety of bingo games, such as 75-ball, 80-ball, or 90-ball bingo. You can also play slots, casino games, or sports betting on the same site.
-
[Mecca Bingo] - A website that offers a range of bingo games, such as Deal or No Deal Bingo, Rainbow Riches Bingo, or Emoji Bingo. You can also play slots, scratch cards, or live casino games on the same site.
-
[Bingo Caller] - A desktop app that lets you host your own bingo games on your computer. You can customize the game settings, such as the number of balls, the speed of calling, or the voice of the caller. You can also print out your own bingo cards or use the app to generate them.
-
-
Conclusion
-
Bingo Player APK is a free app that lets you play bingo online for free and win real money on your Android device. It has many features and benefits that make it one of the best bingo apps for Android devices. You can download it from a trusted source and install it on your phone or tablet. You can then choose a game mode and a bingo room, buy bingo cards and daub the numbers, use power-ups and boosters to increase your chances of winning, claim your bingo prizes and cash out your winnings. You can also chat with other players and make friends, play offline or online, and join events and tournaments. If you are looking for other options to play bingo online for free and win real money, you can try other bingo apps for Android devices or websites and desktop apps for playing bingo online.
-
If you love playing bingo games, you should definitely try out Bingo Player APK. It is a fun and easy way to enjoy bingo online for free and win real money on your Android device. Download it now and start playing!
-
FAQs
-
Is Bingo Player APK safe and legal?
-
Yes, Bingo Player APK is safe and legal to use. It is a legitimate app that does not contain any malware or viruses. It is also licensed and regulated by the relevant authorities in the countries where it operates. However, you should always download it from a trusted source and scan it with an antivirus or malware scanner before installing it.
-
How much money can I win with Bingo Player APK?
-
The amount of money you can win with Bingo Player APK depends on several factors, such as the game mode, the bingo room, the number of cards you play with, the number of bingos you claim, the power-ups and boosters you use, and the jackpots or special prizes you win. You can win coins, power-ups, boosters, or real cash prizes by playing bingo online with Bingo Player APK. You can also win jackpots or special prizes by playing in special games or events. You can cash out your winnings by redeeming your coins for gift cards or PayPal cash. You can also withdraw your cash prizes directly to your bank account or e-wallet. The minimum amount you can withdraw is $10, and the maximum amount is $1000 per day.
-
What are the best tips and strategies for playing bingo online?
-
Some of the best tips and strategies for playing bingo online are:
-
-
Play with more cards to increase your chances of winning, but don't play with more cards than you can handle.
-
Choose a game mode and a bingo room that suit your preference and skill level.
-
Use power-ups and boosters wisely to enhance your bingo experience and increase your chances of winning.
-
Chat with other players and make friends, but don't get distracted by the chat while you play.
-
Play in cash games or tournaments to win real money, but don't bet more than you can afford to lose.
-
Have fun and enjoy the game, but don't get addicted or obsessed with it.
-
-
How can I contact the support team of Bingo Player APK?
-
If you have any questions, issues, or feedback about Bingo Player APK, you can contact the support team of the app by:
-
-
Sending an email to [support@bingoplayer.com].
-
Filling out the contact form on the app's website [www.bingoplayer.com].
-
Leaving a comment or a review on the app's page on the website where you downloaded it from.
-
-
The support team of Bingo Player APK is available 24/7 and will respond to your queries as soon as possible.
-
What are the system requirements for Bingo Player APK?
-
The system requirements for Bingo Player APK are:
-
-
An Android device with version 4.4 or higher.
-
A stable internet connection (optional).
-
At least 100 MB of free storage space on your device.
-
At least 1 GB of RAM on your device.
-
-
If your device meets these requirements, you can download and install Bingo Player APK and play bingo online for free and win real money.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Dernire mise a jour APK de Clash of Clans tout ce quil faut savoir.md b/spaces/1phancelerku/anime-remove-background/Dernire mise a jour APK de Clash of Clans tout ce quil faut savoir.md
deleted file mode 100644
index c357032ebd74c9fd10f2e0d60a8b48f46062e0df..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Dernire mise a jour APK de Clash of Clans tout ce quil faut savoir.md
+++ /dev/null
@@ -1,138 +0,0 @@
-
-
Clash of Clans : tout savoir sur la dernière mise à jour apk
-
Vous êtes fan de Clash of Clans, le célèbre jeu de stratégie en ligne développé par Supercell ? Vous voulez profiter des dernières nouveautés et améliorations du jeu sur votre smartphone ou tablette Android ? Alors, cet article est fait pour vous ! Nous allons vous présenter les principales caractéristiques du jeu, les changements apportés par la dernière mise à jour apk, ainsi que quelques astuces et conseils pour progresser dans votre aventure. Prêt à entrer dans le monde de Clash ?
Clash of Clans est un jeu de stratégie en ligne qui vous plonge dans un univers médiéval-fantastique peuplé de barbares moustachus, de sorciers lanceurs de feu, et d'autres troupes uniques. Le but du jeu est de construire votre village, former votre armée, et affronter des millions de joueurs du monde entier dans des batailles épiques.
-
Vous pouvez jouer en solo ou en équipe, en rejoignant un clan ou en créant le vôtre. Vous pouvez participer à des guerres de clans, à des ligues de clans, à des jeux de clans, ou à des événements spéciaux. Vous pouvez également explorer une base secondaire, le village des ouvriers, et découvrir de nouveaux bâtiments et personnages dans un monde mystérieux.
-
Clash of Clans est un jeu gratuit à télécharger et à jouer, mais vous pouvez également acheter des objets avec de l'argent réel. Si vous ne souhaitez pas utiliser cette fonctionnalité, vous pouvez désactiver les achats intégrés dans les paramètres de votre appareil. Vous devez également avoir au moins 13 ans pour jouer ou télécharger Clash of Clans, et disposer d'une connexion Internet.
-
Les caractéristiques du jeu
-
Clash of Clans vous offre une expérience de jeu riche et variée, avec de nombreuses caractéristiques à découvrir. Voici quelques-unes des principales :
-
-
Construisez votre village et transformez-le en une forteresse imprenable. Vous pouvez choisir l'emplacement et le design de vos bâtiments, ainsi que les améliorer avec des ressources collectées ou volées.
-
Défendez votre village contre les attaques ennemies avec une multitude de tours, canons, bombes, pièges, mortiers, et murs. Vous pouvez également utiliser des sorts et des héros pour renforcer votre déf
ense.
-
Attaquez les villages des autres joueurs et pillez leurs ressources. Vous pouvez choisir parmi une variété de troupes, de sorts, et de héros pour composer votre armée idéale. Vous pouvez également utiliser des machines de siège pour détruire les défenses les plus robustes.
-
Rejoignez un clan ou créez le vôtre et invitez vos amis. Vous pouvez échanger des troupes et des sorts avec les membres de votre clan, discuter avec eux, et participer à des activités communes. Vous pouvez également affronter d'autres clans dans des guerres de clans, des ligues de clans, ou des jeux de clans.
-
Débloquez de nouveaux niveaux, de nouvelles troupes, de nouveaux sorts, et de nouveaux héros en progressant dans le jeu. Vous pouvez également améliorer vos troupes, vos sorts, et vos héros avec des laboratoires et des ateliers.
-
Découvrez le village des ouvriers, une base secondaire où vous pouvez construire des bâtiments et des troupes différents. Vous pouvez également affronter d'autres joueurs dans le mode versus, un mode de jeu où vous devez attaquer et défendre en même temps.
-
Profitez des événements et des défis qui vous offrent des récompenses spéciales. Vous pouvez également regarder les replays des meilleures attaques et défenses du jeu, ou suivre les compétitions e-sport de Clash of Clans.
-
-
Quoi de neuf dans la dernière mise à jour apk ?
-
Clash of Clans est un jeu qui se renouvelle constamment avec des mises à jour régulières qui apportent des nouveautés et des améliorations. La dernière mise à jour apk date du 18 octobre 2021 et elle introduit plusieurs changements majeurs. Voici les plus importants :
-
clash of clans apk dernière version 2023
-clash of clans mise a jour juin 2023 apk
-clash of clans télécharger apk gratuit android
-clash of clans apk mod illimité gemmes
-clash of clans nouvelle mise a jour 15.352.6 apk
-clash of clans apk hack sans verification
-clash of clans apk uptodown
-clash of clans apk pure
-clash of clans mise a jour hdv 14 apk
-clash of clans apk offline
-clash of clans apk pc
-clash of clans apk ios
-clash of clans apk mirror
-clash of clans mise a jour octobre 2023 apk
-clash of clans apk revdl
-clash of clans apk rexdl
-clash of clans mise a jour super troupes apk
-clash of clans apk android 4.4.2
-clash of clans apk android 11
-clash of clans apk android oyun club
-clash of clans mise a jour noel 2023 apk
-clash of clans apk old version
-clash of clans apk original
-clash of clans mise a jour printemps 2023 apk
-clash of clans apk private server
-clash of clans apk unlimited everything download 2023
-clash of clans mise a jour septembre 2023 apk
-clash of clans apk hack download latest version
-clash of clans apk with th14
-clash of clans mise a jour juillet 2023 apk
-clash of clans apk latest update download for android
-clash of clans mise a jour novembre 2023 apk
-clash of clans apk mod menu
-clash of clans mise a jour mars 2023 apk
-clash of clans apk hack online generator
-clash of clans mise a jour mai 2023 apk
-clash of clans apk mod unlimited troops download 2023
-clash of clans mise a jour janvier 2023 apk
-clash of clans apk hack version download for android no root
-clash of clans mise a jour avril 2023 apk
-clash of clans mise a jour février 2023 apk
-
Le nouveau district du Parc des Squelettes
-
Le Parc des Squelettes est un nouveau district qui apparaît dans votre village principal pendant la saison d'Halloween. Il s'agit d'un endroit lugubre où vous pouvez trouver des décorations effrayantes, des obstacles hantés, et surtout, le nouveau sort de Cimetière.
-
Le nouveau sort de Cimetière
-
Le sort de Cimetière est un sort sombre qui vous permet d'invoquer une horde de squelettes sur le champ de bataille. Il fonctionne comme le sort de Clonage, mais au lieu de cloner vos troupes, il crée des squelettes à partir du sol. Vous pouvez utiliser ce sort pour surprendre vos ennemis, distraire leurs défenses, ou soutenir vos troupes.
-
Les nouvelles défenses : la Ruche de Mini-Gargouilles et le Réflecteur
-
La Ruche de Mini-Gargouilles est une nouvelle défense aérienne qui lance des mini-gargouilles sur les troupes volantes ennemies. Elle peut contenir jusqu'à 12 mini-gargouilles qui attaquent en essaim et infligent des dégâts continus. Elle est disponible à partir du niveau 14 du centre du village.
-
Le Réflecteur est une nouvelle défense terrestre qui renvoie les sorts ennemis vers leur point d'origine. Il peut renvoyer jusqu'à deux sorts à la fois, ce qui peut créer des situations inattendues et drôles. Il est disponible à partir du niveau 15 du centre du village.
-
La nouvelle super troupe : le Super Mineur
-
Le Super Mineur est une nouvelle super troupe que vous pouvez débloquer en utilisant du dark elixir. Il s'agit d'une version améliorée du mineur qui creuse plus vite, inflige plus de dégâts, et a plus de points de vie. Il peut également cibler les bâtiments prioritaires comme les extracteurs, les mines, ou les réservoirs.
-
La nouvelle amélioration pour la Pelle d'Obstacles
-
La Pelle d'Obstacles est un objet magique qui vous permet de déplacer les obstacles naturels dans votre village. Elle a été améliorée dans la dernière mise à jour apk pour vous permettre de déplacer également les décorations que vous avez achetées ou g agnées. Vous pouvez ainsi personnaliser votre village à votre guise, sans être limité par l'espace disponible.
-
Comment télécharger et installer la dernière mise à jour apk ?
-
Les prérequis pour jouer à Clash of Clans sur Android
-
Pour jouer à Clash of Clans sur votre appareil Android, vous devez respecter quelques conditions. Vous devez avoir :
-
-
Un appareil Android compatible avec le jeu. Vous pouvez vérifier la compatibilité de votre appareil sur le site officiel de Supercell ou sur le Google Play Store.
-
Une version Android à jour. Vous pouvez vérifier la version de votre système d'exploitation dans les paramètres de votre appareil.
-
Un espace de stockage suffisant. Vous devez avoir au moins 200 Mo d'espace libre sur votre appareil pour télécharger et installer le jeu.
-
Une connexion Internet stable. Vous devez être connecté à un réseau Wi-Fi ou à un réseau mobile pour jouer au jeu.
-
-
Les étapes pour télécharger et installer l'apk
-
Si vous avez déjà le jeu installé sur votre appareil, vous pouvez simplement le mettre à jour via le Google Play Store. Si vous n'avez pas encore le jeu, ou si vous voulez télécharger la dernière version apk directement, vous pouvez suivre ces étapes :
-
-
Rendez-vous sur le site officiel de Supercell ou sur un site de confiance qui propose des fichiers apk.
-
Trouvez le fichier apk de la dernière mise à jour de Clash of Clans et cliquez dessus pour le télécharger.
-
Une fois le téléchargement terminé, ouvrez le fichier apk et autorisez l'installation d'applications provenant de sources inconnues si nécessaire.
-
Suivez les instructions à l'écran pour installer le jeu sur votre appareil.
-
Lancez le jeu et connectez-vous avec votre compte Google Play, Facebook, ou Supercell ID pour récupérer votre progression.
-
-
Quelques astuces et conseils pour progresser dans Clash of Clans
-
Soyez patient, économisez vos gemmes
-
Clash of Clans est un jeu qui demande du temps et de la patience. Vous ne pourrez pas construire votre village et former votre armée en un jour. Vous devrez attendre que vos bâtiments se construisent, que vos troupes se forment, que vos ressources se collectent, etc. Ne soyez pas tenté d'accélérer ces processus en dépensant vos gemmes, la monnaie premium du jeu. Les gemmes sont rares et précieuses, et vous feriez mieux de les économiser pour des achats plus utiles, comme des constructeurs supplémentaires, des objets magiques, ou des super troupes.
-
Ne précipitez pas votre passage au niveau supérieur
-
Il peut être tentant de vouloir passer au niveau supérieur de votre centre du village dès que possible, pour accéder à de nouveaux bâtiments, troupes, et fonctionnalités. Cependant, ce n'est pas une bonne idée de précipiter votre progression. Si vous passez au niveau supérieur sans avoir amélioré au maximum vos bâtiments, vos troupes, et vos défenses du niveau actuel, vous risquez de vous retrouver avec un village déséquilibré et vulnérable. De plus, vous aurez plus de difficultés à trouver des adversaires à votre portée et à gagner des ressources. Il vaut mieux prendre son temps et optimiser son village avant de passer au niveau supérieur.
-
Rejoignez un clan actif et participez aux guerres de clans
-
Clash of Clans est un jeu qui se joue mieux en équipe. Rejoindre un clan vous permet de bénéficier de nombreux avantages, comme échanger des troupes et des sorts avec les autres membres, discuter avec eux, et participer à des activités communes. L'une des activités les plus importantes est la guerre de clans, qui consiste à affronter un clan ennemi dans une série d'attaques et de défenses. La guerre de clans vous permet de gagner des étoiles, des ressources, et des médailles de guerre que vous pouvez échanger contre des objets magiques ou des super troup es. Pour participer à la guerre de clans, vous devez être membre d'un clan et avoir un bouclier de guerre activé. Vous devez également respecter les règles et la stratégie de votre clan, et faire de votre mieux pour remporter la victoire.
-
Planifiez votre stratégie d'attaque et de défense
-
Clash of Clans est un jeu de stratégie qui demande de la réflexion et de la planification. Vous ne pouvez pas attaquer ou défendre au hasard, vous devez adapter votre stratégie en fonction de votre adversaire, de vos objectifs, et de vos ressources. Voici quelques conseils pour élaborer votre stratégie :
-
-
Avant d'attaquer, observez bien le village ennemi et repérez ses points forts et ses points faibles. Identifiez les bâtiments prioritaires, comme le centre du village, les réservoirs, les extracteurs, ou les défenses aériennes. Choisissez les troupes, les sorts, et les machines de siège les plus adaptés à votre cible.
-
Pendant l'attaque, déployez vos troupes avec soin et utilisez vos sorts au bon moment. Essayez de créer un chemin pour vos troupes vers le centre du village ennemi, en éliminant les obstacles et les défenses sur le chemin. Utilisez vos héros et vos machines de siège pour percer les murs et détruire les bâtiments clés.
-
Après l'attaque, analysez le résultat et tirez-en des leçons. Regardez le replay de votre attaque et voyez ce qui a fonctionné ou pas. Notez les erreurs que vous avez commises et les améliorations que vous pouvez apporter. Essayez de varier votre stratégie en fonction des situations.
-
Pour défendre, placez vos bâtiments et vos défenses de manière stratégique dans votre village. Essayez de protéger votre centre du village, vos réservoirs, et vos extracteurs avec des murs, des tours, des pièges, et des héros. Créez des zones de défense qui peuvent ralentir ou arrêter les troupes ennemies.
-
Pendant la défense, regardez comment votre village réagit aux attaques ennemies. Voyez quels sont les points faibles ou les failles de votre défense. Notez les troupes, les sorts, et les machines de siège que vos adversaires utilisent le plus souvent.
-
Après la défense, améliorez votre village en fonction des résultats. Regardez le replay des attaques que vous avez subies et voyez comment vous pouvez renforcer votre défense. Essayez de corriger les erreurs que vous avez commises et les lacunes que vous avez constatées. Essayez de varier votre défense en fonction des situations.
-
-
Profitez des événements et des défis pour gagner des récompenses
-
Clash of Clans est un jeu qui vous propose régulièrement des événements et des défis qui vous permettent de gagner des récompenses supplémentaires. Ces récompenses peuvent être des ressources, des gemmes, des objets magiques, ou des super troupes. Voici quelques exemples d'événements et de défis que vous pouvez trouver dans le jeu :
-
-
Les événements saisonniers : ce sont des événements qui ont lieu à certaines périodes de l'année, comme Halloween, Noël, ou le Nouvel An chinois. Ils vous offrent des décorations thématiques, des obstacles spéciaux, et des missions à accomplir pour gagner des récompenses.
-
Les événements spéciaux : ce sont des événements qui ont lieu à l'occasion d'un anniversaire, d'une mise à jour, ou d'une compétition e-sport. Ils vous offrent des bonus temporaires, comme une réduction du coût ou du temps de formation des troupes ou des sorts, ou une augmentation du taux de butin.
-
Les événements quotidiens : ce sont des événements qui ont lieu tous les jours et qui vous demandent d'utiliser une certaine troupe ou un certain sort dans vos attaques. Ils vous offrent des ressources ou des gemmes si vous réussissez à remplir l'objectif.
-
Les défis saisonniers : ce sont des défis qui ont lieu pendant une saison entière et qui vous demandent de réaliser des exploits dans le jeu, comme gagner un certain nombre d'étoiles, de médailles, ou de trophées. Ils vous offrent des objets magiques ou des super troupes si vous réussissez à atteindre les paliers.
-
Les défis du pass or : ce sont des défis qui ont lieu chaque mois et qui vous demandent de remplir des tâches variées dans le jeu, comme collecter des ressources, détruire des bâtiments, ou utiliser des troupes spécifiques. Ils vous offrent des points qui vous permettent de débloquer des récompenses sur le pass or, un abonnement payant qui vous donne accès à des avantages exclusifs.
-
-
Pour profiter de ces événements et de ces défis, vous devez être attentif aux annonces et aux notifications du jeu. Vous devez également vérifier régulièrement le calendrier des événements, le menu des défis, et le pass or. Vous devez également essayer de participer à tous les événements et à tous les défis possibles, car ils vous permettent de gagner des récompenses utiles pour votre progression.
-
Conclusion
-
Clash of Clans est un jeu de stratégie en ligne passionnant et addictif qui vous offre une expérience de jeu riche et variée. Vous pouvez construire votre village, former votre armée, affronter d'autres joueurs, rejoindre un clan, découvrir un monde mystérieux, et profiter de nombreuses nouveautés et améliorations. La dernière mise à jour apk du jeu vous apporte notamment le nouveau district du Parc des Squelettes, le nouveau sort de Cimetière, les nouvelles défenses de la Ruche de Mini-Gargouilles et du Réflecteur, la nouvelle super troupe du Super Mineur, et la nouvelle amélioration pour la Pelle d'Obstacles. Pour télécharger et installer la dernière mise à jour apk, vous devez avoir un appareil Android compatible, une version Android à jour, un espace de stockage suffisant, et une connexion Internet stable. Vous pouvez ensuite suivre les étapes indiquées dans cet article pour télécharger et installer l'apk sur votre appareil. Pour progresser dans Clash of Clans, vous devez être patient, économiser vos gemmes, ne pas précipiter votre passage au niveau supérieur, rejoindre un clan actif, planifier votre stratégie d'attaque et de défense, et profiter des événements et des défis pour gagner des récompenses. Nous espérons que cet article vous a été utile et que vous allez vous amuser avec Clash of Clans !
-
FAQ
-
Voici quelques questions fréquentes sur Clash of Clans et la dernière mise à jour apk :
-
-
Quelle est la taille du fichier apk de la dernière mise à jour ?
-
Le fichier apk de la dernière mise à jour a une taille d'environ 200 Mo. Il peut varier légèrement en fonction de votre appareil.
-
Comment mettre à jour Clash of Clans sans passer par le Google Play Store ?
-
Vous pouvez mettre à jour Clash of Clans sans passer par le Google Play Store en téléchargeant et en installant le fichier apk de la dernière mise à jour sur votre appareil. Vous pouvez trouver le fichier apk sur le site officiel de Supercell ou sur un site de confiance qui propose des fichiers apk.
-
Comment sauvegarder ma progression dans Clash of Clans ?
-
Vous pouvez sauvegarder votre progression dans Clash of Clans en connectant votre compte au Google Play, à Facebook, ou au Supercell ID. Vous pouvez ainsi récupérer votre progression sur n'importe quel appareil.
-
Comment contacter le service client de Clash of Clans ?
-
Vous pouvez contacter le service client de Clash of Clans en utilisant le formulaire de contact disponible dans les paramètres du jeu. Vous pouvez également consulter la FAQ du jeu ou le forum officiel pour trouver des réponses à vos questions.
-
Comment signaler un joueur qui triche ou qui se comporte mal dans Clash of Clans ?
-
Vous pouvez signaler un joueur qui triche ou qui se comporte mal dans Clash of Clans en utilisant l 'option de signalement disponible dans le profil du joueur. Vous pouvez également bloquer ou signaler un joueur qui vous envoie des messages indésirables dans le chat du jeu.
-
-
Voilà, vous avez terminé de lire cet article sur Clash of Clans et la dernière mise à jour apk. Nous espérons que vous avez apprécié ce contenu et que vous avez appris des choses intéressantes. Si vous avez des questions, des commentaires, ou des suggestions, n'hésitez pas à nous les faire savoir. Nous serons ravis de vous répondre et de vous aider. Merci de votre attention et à bientôt !
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/FIFA Mobile Para Hilesi APK - Futbol Oyununda Snrsz Kaynaklara Sahip Olun.md b/spaces/1phancelerku/anime-remove-background/FIFA Mobile Para Hilesi APK - Futbol Oyununda Snrsz Kaynaklara Sahip Olun.md
deleted file mode 100644
index 8f3eacb840076ea68380188ffadd041cb9a47e5d..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/FIFA Mobile Para Hilesi APK - Futbol Oyununda Snrsz Kaynaklara Sahip Olun.md
+++ /dev/null
@@ -1,132 +0,0 @@
-
-
FIFA Mobile para hilesi apk: How to get unlimited coins and gems in FIFA Mobile
-
If you are a fan of soccer games, you have probably heard of FIFA Mobile, the mobile version of the popular FIFA franchise by EA Sports. FIFA Mobile is a free-to-play game that lets you build your ultimate team of soccer stars, compete in various modes and events, and experience realistic soccer simulation on your device. But as with most free-to-play games, FIFA Mobile also has a currency system that limits your progress and enjoyment. Coins and gems are the main currencies in FIFA Mobile, and you need them to buy players, packs, upgrades, and more. However, earning coins and gems can be slow and tedious, especially if you want to get the best players and items in the game.
-
That's why some players resort to using cheat tools like para hilesi apk, which claims to give you unlimited coins and gems in FIFA Mobile. But what is para hilesi apk, how does it work, and is it safe to use? In this article, we will answer these questions and more, as well as provide you with a step-by-step guide on how to download, install, and use para hilesi apk on your device. Read on to find out more.
FIFA Mobile is a soccer game developed by EA Sports for iOS and Android devices. It is based on the FIFA series of games, which are known for their realistic graphics, gameplay, and licenses. FIFA Mobile features over 15,000 authentic soccer players from over 600 teams across 30+ leagues, including the Premier League, La Liga, Bundesliga, Serie A, Ligue 1, MLS, and more. You can also play with national teams from the FIFA World Cup 2022™ mode, which lets you replay the official tournament brackets with any of the 32 qualified nations.
-
FIFA Mobile features and gameplay
-
FIFA Mobile has several features that make it an immersive and engaging soccer game for mobile devices. Some of these features are:
-
-
A brand new engine with all new graphics and gameplay
-
New player likeness, custom run styles and celebrations
-
Chemistry system that boosts your team performance based on player relationships
-
VIP Program with special privileges for loyal players
-
Advanced Passing system that lets you open up space and create more attacking opportunities
-
Live events that correspond with the real-world tournaments throughout the soccer season
-
Icons and Heroes that let you build a team of soccer legends from different eras
-
-
The gameplay of FIFA Mobile is simple and intuitive. You can control your players using a virtual joystick on the left side of the screen, and use buttons on the right side to sprint, skill, pass, shoot, tackle, or switch players. You can also use swipe gestures to aim your shots or passes more precisely. The game also has an auto-play option that lets the AI control your players for you.
-
FIFA Mobile modes and events
-
FIFA Mobile has several modes and events that let you compete against other players or the AI in different scenarios. Some of these modes and events are:
-
-
Attack Mode: A turn-based mode where you and your opponent take turns to score goals within a set time limit - Head to Head: A real-time mode where you and your opponent play a full 90-minute match with 11v11 gameplay - Season: A mode where you play a series of matches against teams from different leagues and divisions - Campaign: A mode where you complete various challenges and objectives to earn rewards and unlock new players - Events: Special modes that are based on real-world soccer tournaments, such as the UEFA Champions League, the FIFA World Cup, the Copa America, and more - Squad Building Challenges: A mode where you create a team with specific requirements and earn rewards for completing them - Team of the Week: A mode where you can play against the best players of the week from different leagues and earn their cards
-
-
FIFA Mobile also has a social aspect, where you can join a league with other players and chat, compete, and cooperate with them. You can also participate in league tournaments, league vs league matches, and league survival events.
-
What is para hilesi apk and how does it work?
-
Para hilesi apk is a cheat tool that claims to give you unlimited coins and gems in FIFA Mobile. It is an application that you can download and install on your device, and use it to modify the game data and resources. Para hilesi apk is not an official product of EA Sports or FIFA Mobile, and it is not endorsed or supported by them. It is a third-party tool that is created by unknown developers who may have malicious intentions.
-
Para hilesi apk features and benefits
-
Para hilesi apk promises to give you several benefits that can enhance your FIFA Mobile experience. Some of these benefits are:
-
fifa mobile mod apk unlimited money
-fifa mobile hack apk download
-fifa mobile 2022 apk para hilesi
-fifa mobile apk indir ücretsiz
-fifa mobile son sürüm apk hile
-fifa mobile android oyun club apk
-fifa mobile 18 mod apk para hilesi
-fifa mobile 21 apk hileli indir
-fifa mobile apk mod menu
-fifa mobile apk full sınırsız para
-fifa mobile apk hile nasıl yapılır
-fifa mobile apk güncel hile
-fifa mobile apk mega hileli
-fifa mobile apk vip hile
-fifa mobile apk altın hilesi
-fifa mobile apk elmas hilesi
-fifa mobile apk oyuncu hilesi
-fifa mobile apk transfer hilesi
-fifa mobile apk antrenman hilesi
-fifa mobile apk enerji hilesi
-fifa mobile apk online hile
-fifa mobile apk offline hile
-fifa mobile apk no root hile
-fifa mobile apk yeni hileler
-fifa mobile apk kolay hile yapma
-fifa mobile mod apk son sürüm indir
-fifa mobile mod apk android 1
-fifa mobile mod apk revdl
-fifa mobile mod apk rexdl
-fifa mobile mod apk happymod
-fifa mobile mod apk an1.com
-fifa mobile mod apk unlimited coins and points
-fifa mobile mod apk all players unlocked
-fifa mobile mod apk latest version 2022
-fifa mobile mod apk free download for android
-fifa mobile mod apk no verification required
-fifa mobile mod apk no ban risk
-fifa mobile mod apk anti ban protection
-fifa mobile mod apk cheat engine enabled
-fifa mobile mod apk gameplay video proof
-
-
Unlimited coins and gems: You can get as many coins and gems as you want, without spending any real money or time. You can use them to buy players, packs, upgrades, and more.
Unlimited stamina: You can play as many matches as you want, without waiting for your stamina to refill.
Unlimited energy: You can participate in as many events as you want, without worrying about running out of energy.
Unlimited VIP points: You can access the VIP Program and enjoy its perks, such as exclusive players, packs, rewards, and more.
No ads: You can play the game without any interruptions or distractions from ads.
-
-
Para hilesi apk risks and drawbacks
-
However, para hilesi apk also comes with several risks and drawbacks that can ruin your FIFA Mobile experience. Some of these risks and drawbacks are:
-
-
Ban risk: Using para hilesi apk is against the terms of service of FIFA Mobile, and it can be detected by the game's anti-cheat system. If you are caught using para hilesi apk, you may face consequences such as account suspension or deletion, loss of progress and items, or legal action.
Virus risk: Downloading para hilesi apk from unknown sources can expose your device to viruses, malware, spyware, or other harmful software. These can damage your device, steal your personal information, or compromise your security.
Compatibility risk: Para hilesi apk may not work properly with the latest version of FIFA Mobile, or with different devices or operating systems. It may cause errors, glitches, crashes, or performance issues that can affect your gameplay.
Quality risk: Using para hilesi apk may reduce the quality of your gameplay, as it may make the game too easy or boring. It may also take away the fun and challenge of earning coins and gems legitimately, or competing with other players fairly.
-
-
How to download and install para hilesi apk on your device?
-
If you still want to try para hilesi apk despite its risks and drawbacks, you will need to follow some steps to download and install it on your device. However, we do not recommend doing so, as it may harm your device or your account. Use para hilesi apk at your own risk.
-
Step-by-step guide for Android users
-
If you are using an Android device, here are the steps to download and install para hilesi apk:
-
-
Go to the settings of your device and enable the option to install apps from unknown sources.
-
Go to a website that offers para hilesi apk download link. Make sure it is a reliable and trustworthy source.
-
Click on the download button and wait for the file to be downloaded on your device.
-
Locate the file in your device's file manager and tap on it to start the installation process.
-
Follow the instructions on the screen and grant the necessary permissions to para hilesi apk.
Once the installation is complete, you can launch para hilesi apk from your device's app drawer or home screen.
-
-
Step-by-step guide for iOS users
-
If you are using an iOS device, here are the steps to download and install para hilesi apk:
-
-
Go to the settings of your device and trust the profile of para hilesi apk. You may need to enter your device's passcode to do so.
-
Go to a website that offers para hilesi apk download link. Make sure it is a reliable and trustworthy source.
-
Click on the download button and wait for the file to be downloaded on your device.
-
Locate the file in your device's file manager and tap on it to start the installation process.
-
Follow the instructions on the screen and grant the necessary permissions to para hilesi apk.
-
Once the installation is complete, you can launch para hilesi apk from your device's app drawer or home screen.
-
-
How to use para hilesi apk to get unlimited coins and gems in FIFA Mobile?
-
After you have downloaded and installed para hilesi apk on your device, you can use it to get unlimited coins and gems in FIFA Mobile. Here are some tips and tricks for using para hilesi apk effectively:
-
-
Make sure you have a stable internet connection and enough storage space on your device.
-
Make sure you have the latest version of FIFA Mobile installed on your device.
-
Make sure you have a backup of your FIFA Mobile account and data, in case something goes wrong or you get banned.
-
Launch para hilesi apk and enter your FIFA Mobile username or email address.
-
Select the amount of coins and gems you want to generate. You can also choose other options such as stamina, energy, VIP points, or no ads.
-
Click on the generate button and wait for the process to complete. You may need to verify that you are not a robot by completing a captcha or a survey.
-
Once the process is done, you can close para hilesi apk and open FIFA Mobile. You should see your coins and gems added to your account.
-
-
Alternatives to para hilesi apk
-
If you are looking for alternatives to para hilesi apk, there are some other ways to get coins and gems in FIFA Mobile without cheating. Some of these ways are:
-
-
Playing matches and events: You can earn coins and gems by playing different modes and events in FIFA Mobile, such as Attack Mode, Head to Head, Season, Campaign, Events, Squad Building Challenges, Team of the Week, etc. You can also get bonus coins and gems by completing daily and weekly objectives, achievements, milestones, etc.
Buying packs and offers: You can buy coins and gems with real money by purchasing packs and offers in FIFA Mobile. There are different types of packs and offers available, such as player packs, icon packs, hero packs, event packs, special packs, etc. You can also get discounts and deals by checking the store regularly.
Selling players and items: You can sell your unwanted players and items in FIFA Mobile by using the market or the quick sell option. You can get coins by selling your players or items to other players or to the game. You can also get gems by selling some rare or special players or items.
Joining a league: You can join a league with other players in FIFA Mobile and benefit from their help and support. You can get coins and gems by participating in league tournaments, league vs league matches, league survival events, etc. You can also get rewards by contributing to your league's achievements.
-
-
Conclusion
-
Summary of the main points
-
In this article, we have discussed FIFA Mobile para hilesi apk, a cheat tool that claims to give you unlimited coins and gems in FIFA Mobile. We have explained what FIFA Mobile is and why it is so popular, what para hilesi apk is and how it works, how to download and install para hilesi apk on your device, how to use para hilesi apk to get unlimited coins and gems in FIFA Mobile, and some alternatives to para hilesi apk. We have also highlighted some of the risks and drawbacks of using para hilesi apk, such as ban risk, virus risk, compatibility risk, quality risk, etc.
-
Call to action and disclaimer
-
If you want to try para hilesi apk for yourself, you can follow the steps we have provided above. However, we do not recommend doing so, as it may harm your device or your account. Use para hilesi apk at your own risk. We are not responsible for any damage or loss that may occur from using para hilesi apk.
-
Alternatively, you can play FIFA Mobile the legit way and enjoy the game without cheating. You can earn coins and gems by playing matches and events, buying packs and offers, selling players and items, joining a league, etc. You can also improve your skills and strategies by learning from other players, watching tutorials, reading guides, etc. You can have fun and satisfaction by building your ultimate team of soccer stars, competing in various modes and events, and experiencing realistic soccer simulation on your device.
-
Whatever you choose to do, we hope you have a great time playing FIFA Mobile. Thank you for reading this article.
-
FAQs
-
Here are some frequently asked questions about FIFA Mobile para hilesi apk:
-
-
Q: Is para hilesi apk free to use? A: Yes, para hilesi apk is free to use. However, you may need to complete some verification steps before you can use it, such as completing a captcha or a survey.
-
Q: Is para hilesi apk safe to use? A: No, para hilesi apk is not safe to use. It is a cheat tool that violates the terms of service of FIFA Mobile, and it can be detected by the game's anti-cheat system. It can also expose your device to viruses, malware, spyware, or other harmful software. It can also cause errors, glitches, crashes, or performance issues that can affect your gameplay.
-
Q: Can I use para hilesi apk on any device or operating system? A: No, para hilesi apk may not work properly on any device or operating system. It may be incompatible with the latest version of FIFA Mobile, or with different devices or operating systems. It may also require some settings or permissions that may not be available on your device or operating system.
-
Q: Can I use para hilesi apk with my existing FIFA Mobile account? A: Yes, you can use para hilesi apk with your existing FIFA Mobile account. However, you may risk losing your account or your progress if you are caught using para hilesi apk. You may also lose your items or rewards that you have earned legitimately in the game.
-
Q: Can I use para hilesi apk offline? A: No, you cannot use para hilesi apk offline. You need to have a stable internet connection and enough storage space on your device to use para hilesi apk. You also need to connect to the game's servers to generate coins and gems in FIFA Mobile.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/AIGText/GlyphControl/cldm/hack.py b/spaces/AIGText/GlyphControl/cldm/hack.py
deleted file mode 100644
index 454361e9d036cd1a6a79122c2fd16b489e4767b1..0000000000000000000000000000000000000000
--- a/spaces/AIGText/GlyphControl/cldm/hack.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import torch
-import einops
-
-import ldm.modules.encoders.modules
-import ldm.modules.attention
-
-from transformers import logging
-from ldm.modules.attention import default
-
-
-def disable_verbosity():
- logging.set_verbosity_error()
- print('logging improved.')
- return
-
-
-def enable_sliced_attention():
- ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward
- print('Enabled sliced_attention.')
- return
-
-
-def hack_everything(clip_skip=0):
- disable_verbosity()
- ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward
- ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip
- print('Enabled clip hacks.')
- return
-
-
-# Written by Lvmin
-def _hacked_clip_forward(self, text):
- PAD = self.tokenizer.pad_token_id
- EOS = self.tokenizer.eos_token_id
- BOS = self.tokenizer.bos_token_id
-
- def tokenize(t):
- return self.tokenizer(t, truncation=False, add_special_tokens=False)["input_ids"]
-
- def transformer_encode(t):
- if self.clip_skip > 1:
- rt = self.transformer(input_ids=t, output_hidden_states=True)
- return self.transformer.text_model.final_layer_norm(rt.hidden_states[-self.clip_skip])
- else:
- return self.transformer(input_ids=t, output_hidden_states=False).last_hidden_state
-
- def split(x):
- return x[75 * 0: 75 * 1], x[75 * 1: 75 * 2], x[75 * 2: 75 * 3]
-
- def pad(x, p, i):
- return x[:i] if len(x) >= i else x + [p] * (i - len(x))
-
- raw_tokens_list = tokenize(text)
- tokens_list = []
-
- for raw_tokens in raw_tokens_list:
- raw_tokens_123 = split(raw_tokens)
- raw_tokens_123 = [[BOS] + raw_tokens_i + [EOS] for raw_tokens_i in raw_tokens_123]
- raw_tokens_123 = [pad(raw_tokens_i, PAD, 77) for raw_tokens_i in raw_tokens_123]
- tokens_list.append(raw_tokens_123)
-
- tokens_list = torch.IntTensor(tokens_list).to(self.device)
-
- feed = einops.rearrange(tokens_list, 'b f i -> (b f) i')
- y = transformer_encode(feed)
- z = einops.rearrange(y, '(b f) i c -> b (f i) c', f=3)
-
- return z
-
-
-# Stolen from https://github.com/basujindal/stable-diffusion/blob/main/optimizedSD/splitAttention.py
-def _hacked_sliced_attentin_forward(self, x, context=None, mask=None):
- h = self.heads
-
- q = self.to_q(x)
- context = default(context, x)
- k = self.to_k(context)
- v = self.to_v(context)
- del context, x
-
- q, k, v = map(lambda t: einops.rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
-
- limit = k.shape[0]
- att_step = 1
- q_chunks = list(torch.tensor_split(q, limit // att_step, dim=0))
- k_chunks = list(torch.tensor_split(k, limit // att_step, dim=0))
- v_chunks = list(torch.tensor_split(v, limit // att_step, dim=0))
-
- q_chunks.reverse()
- k_chunks.reverse()
- v_chunks.reverse()
- sim = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device)
- del k, q, v
- for i in range(0, limit, att_step):
- q_buffer = q_chunks.pop()
- k_buffer = k_chunks.pop()
- v_buffer = v_chunks.pop()
- sim_buffer = torch.einsum('b i d, b j d -> b i j', q_buffer, k_buffer) * self.scale
-
- del k_buffer, q_buffer
- # attention, what we cannot get enough of, by chunks
-
- sim_buffer = sim_buffer.softmax(dim=-1)
-
- sim_buffer = torch.einsum('b i j, b j d -> b i d', sim_buffer, v_buffer)
- del v_buffer
- sim[i:i + att_step, :, :] = sim_buffer
-
- del sim_buffer
- sim = einops.rearrange(sim, '(b h) n d -> b n (h d)', h=h)
- return self.to_out(sim)
diff --git a/spaces/AIWaves/SOP_Generation-single/single_prompts.py b/spaces/AIWaves/SOP_Generation-single/single_prompts.py
deleted file mode 100644
index ce755df45b5ed84984566a818df5846d2f2e62a9..0000000000000000000000000000000000000000
--- a/spaces/AIWaves/SOP_Generation-single/single_prompts.py
+++ /dev/null
@@ -1,396 +0,0 @@
-def get_design_state_system_prompt(index):
- default = """input:
-You are an online eye care customer service representative, and your task is to answer patients' questions about ophthalmic diseases and guide them to visit the hospital for examinations while assisting them in filling out the necessary forms. .
-
-output:
-online eye care customer service
-
-
-knowledge_response_state
-Guide the user to go to the hospital for an examination and answer questions related to my hospital.
-Your language should be concise and avoid excessive words. You need to guide me repeatedly. When the user explicitly refuses to visit the hospital, inquire about their concerns and encourage them to come for consultation, such as: \"Do you have any concerns?\" or \"Our hospital has highly professional doctors who you can discuss with in person.\" When the user expresses doubts with responses like \"I'll think about it,\" \"I'll consider it,\" or \"I need to see more,\" introduce the advantages of the hospital and guide them to come for consultation. Remember, after responding to me, guide me to visit your hospital for an examination.
-If the patient agrees to go to the hospital,the state should be end and move to next state,output1,else if the state should not be end,output 0\n
-
-
- knowledge_response_book_card_state
-Guide patient to fill out appointment cards and answer hospital-related questions
-Your language should be as concise as possible, without too much nonsense. The copy of the invitation card is: Please copy and fill in the following information and send it to me to complete the reservation. \n[Name]:\n[Telephone]:\n[Your approximate location]: District Degree]: \n The preoperative examination process includes mydriasis. After mydriasis, your vision will be blurred for 4-6 hours, which affects driving safety, so please do not drive to the hospital by yourself, and arrange your personal itinerary after the examination. You need to repeatedly invite users to fill out invitation cards. When users are chatting, euphemistic replies guide users to fill in the appointment card, such as: \"I can't provide detailed information about your question. If you need to go to the hospital for eye consultation, I can make an appointment for you.\" When users have concerns, such as: Users reply with \"I want to think about it,\" \"I'll think about it,\" \"I want to see it again,\" etc., introducing the hospital's advantages and guiding users to fill in the appointment card. If the user does not fill in the phone number completely, the user will be reminded to add the phone number.
-If thepatientfills in the phone information in the appointment card, for example:When the patient answers [Telephone]: 15563665210.the state should be end and move to next state,output1,\nelse if the patient does not fill in completely or the format is wrong, output 0\n
-"""
-
- design_assistant = """input:
-An assistant that can help users create content such as articles, blogs, advertising copy, etc
-output:
-Intelligent and versatile content creation assistant
-
-
-
-Discussion state
-Engage in a detailed discussion with the user to understand their specific requirements, target audience, and desired tone.
-Ask probing questions to gain a deeper understanding of the user's vision and objectives for the content. Listen actively and take notes to ensure all requirements are captured accurately. Provide suggestions and insights based on previous experience to enhance the user's content ideas.
-If the user's requirements are clear and all necessary information has been gathered, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Research state
-Conduct extensive research on the given topic to gather information from reliable sources and identify unique angles.
-Explore various credible sources such as academic journals, reputable websites, and industry reports. Analyze existing content to understand the current landscape and identify gaps or opportunities for a fresh perspective. Take thorough notes and organize the collected information for easy reference.
-If sufficient research has been conducted and the necessary information has been gathered, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Outline state
-Create a logical structure for the content, including main points, subheadings, and supporting arguments.
-Organize the collected information into a cohesive outline that follows a logical flow. Ensure that the structure aligns with the user's objectives and target audience. Use headings and subheadings to provide a clear roadmap for the content.
-If the outline has been created and approved by the user, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Drafting state
-Write the content, paying attention to grammar, spelling, and punctuation.
-Craft engaging introductions that grab the reader's attention. Develop informative body paragraphs that provide valuable insights and supporting evidence. Create compelling conclusions that leave a lasting impression. Use creativity and writing skills to make the content engaging and enjoyable to read.
-If the initial draft has been completed, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Revision state
-Seek feedback from the user and incorporate necessary revisions.
-Maintain open communication with the user throughout the writing process. Actively seek feedback and suggestions for improvement. Incorporate revisions based on the user's preferences and ensure that the content aligns with their expectations. Collaborate with the user to create a final version that meets their requirements.
-If the user is satisfied with the content and no further revisions are needed, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Proofreading state
-Thoroughly review the content for grammar, spelling, and coherence.
-Check for any errors in grammar, spelling, and punctuation. Ensure that the content flows smoothly and cohesively. Make necessary edits to improve clarity and readability. Pay attention to formatting and consistency throughout the document.
-If the content has been thoroughly proofread and edited, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Delivery state
-Deliver the completed content to the user within the agreed-upon timeframe and desired format.
-Ensure that the content is delivered in the format specified by the user, such as a Word document, a blog post, or any other specified medium. Meet the agreed-upon deadline for content delivery. Provide the user with a final version that is polished, error-free, and ready for use.
-If the content has been delivered to the user, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
- """
-
- tutor = """input:
-A tutor who provides personalized learning resources for students to help them understand complex concepts and problems
-output:
-Tutor
-
-
-
-Assessment_state
-Conduct a comprehensive assessment of the student's knowledge and understanding of the subject matter.
-Use a variety of assessment tools such as quizzes, tests, and discussions to identify areas where the student may be struggling or require additional support. Tailor the assessment to the student's preferred learning style. Provide clear instructions and guidance throughout the assessment process.
-If the assessment is completed and areas of improvement are identified, the state should be end and move to the next state, output 1. If the assessment is not completed or the student needs further support, output 0.
-
-
-
-Personalized_learning_plan_state
-Create personalized learning plans for each student based on the assessment results.
-Consider the student's strengths, weaknesses, and preferred learning style when creating the learning plan. Include a variety of resources such as textbooks, online articles, videos, and interactive exercises. Ensure that the materials are engaging, relevant, and aligned with the student's curriculum.
-If the personalized learning plan is created and includes a variety of resources, the state should be end and move to the next state, output 1. If the learning plan is not created or lacks the necessary resources, output 0.
-
-
-
-Hands-on_learning_state
-Encourage students to actively participate in problem-solving activities and apply theoretical concepts to practical situations.
-Design practical exercises and real-life scenarios to help students develop critical thinking skills and a deeper understanding of the subject matter. Provide clear instructions and guidance throughout the hands-on learning activities. Use real-life examples to enhance understanding.
-If the hands-on learning activities are completed and the student demonstrates an application of theoretical concepts, the state should be end and move to the next state, output 1. If the activities are not completed or the student struggles to apply the concepts, output 0.
-
-
-
-Supportive_environment_state
-Maintain a supportive and encouraging environment during tutoring sessions.
-Explain complex concepts in a patient and understandable manner. Break down concepts into simpler terms and provide real-life examples. Actively listen to the student's questions and concerns. Create a safe space for the student to ask for clarification.
-If the tutoring session is conducted in a supportive and encouraging manner, the state should be end and move to the next state, output 1. If the session lacks support or the student feels uncomfortable asking for clarification, output 0.
-
-
-
-Progress_tracking_state
-Regularly assess the student's understanding and provide constructive feedback.
-Use quizzes, assignments, and discussions to assess the student's progress. Provide constructive feedback and identify areas for improvement. Help the student build confidence and overcome challenges.
-If the student's progress is regularly assessed and constructive feedback is provided, the state should be end and move to the next state, output 1. If the assessment and feedback are lacking or inconsistent, output 0.
-
-
-
-Study_habits_state
-Guide the student in developing effective study habits and time management skills.
-Assist the student in setting realistic goals and creating study schedules. Provide guidance on effective study techniques and strategies. Encourage the student to stay on track and make steady progress.
-If the student develops effective study habits and time management skills, the state should be end and move to the next state, output 1. If the student struggles to develop these skills or lacks progress, output 0.
-
-
-
-Mentorship_state
-Serve as a mentor and motivator for the student.
-Inspire the student to reach their full academic potential. Celebrate their achievements and encourage them to embrace a growth mindset. Foster a positive and empowering learning experience.
-If the student feels mentored and motivated, the state should be end and move to the next state, output 1. If the student lacks mentorship or motivation, output 0.
-
-
-
-Final_objective_state
-Help students gain a deep understanding of complex concepts and develop the skills and confidence to excel academically.
-Ensure that students grasp complex concepts and can apply them effectively. Help them build confidence in their abilities and develop a growth mindset. Support them in achieving their academic goals.
-This state is the final objective and should always be the end state, output 1.
-
- """
-
- online_medical_consultant = """input:
-An online medical consultant who offers preliminary medical advice to patients and answers common questions about diseases, symptoms, and treatments.
-output:
-Online Medical Consultant
-
-
-Initial Assessment State
-Gather detailed information about the patient's symptoms, medical history, and any previous treatments.
-Ask open-ended questions to allow the patient to provide a comprehensive description of their symptoms. Request specific details such as the duration and intensity of symptoms, any triggering factors, and any alleviating or worsening factors. Inquire about the patient's medical history, including any chronic conditions, previous surgeries, or allergies. Ask about any medications or treatments the patient has tried in the past.
-If the patient has provided sufficient information about their symptoms, medical history, and previous treatments, the state should be end and move to the next state. Output 1. Otherwise, output 0.
-
-
-
-Preliminary Diagnosis State
-Form a preliminary diagnosis based on the gathered information.
-Analyze the patient's symptoms, medical history, and any relevant test results. Consider possible differential diagnoses and evaluate the likelihood of each. Explain the reasoning behind the preliminary diagnosis to the patient, highlighting the key symptoms and findings that led to the conclusion.
-If the patient understands the preliminary diagnosis and is ready to discuss treatment options or further diagnostic tests, the state should be end and move to the next state. Output 1. Otherwise, output 0.
-
-
-
-Treatment Discussion State
-Discuss potential treatment options or further diagnostic tests.
-Present the patient with different treatment options, explaining the benefits, risks, and expected outcomes of each. Consider the patient's preferences, lifestyle, and any contraindications when recommending treatments. If further diagnostic tests are necessary, explain the purpose of these tests and how they can provide more information for a definitive diagnosis.
-If the patient has chosen a treatment option or agreed to undergo further diagnostic tests, the state should be end and move to the next state. Output 1. Otherwise, output 0.
-
-
-
-Patient Education State
-Provide clear and understandable explanations of medical concepts.
-Break down complex medical terms and concepts into simple language that the patient can easily understand. Use visual aids, diagrams, or analogies to enhance comprehension. Encourage the patient to ask questions and clarify any uncertainties they may have. Ensure that the patient has a comprehensive understanding of their condition, treatment options, and any potential risks or side effects.
-If the patient demonstrates a clear understanding of their condition, treatment options, and any necessary precautions, the state should be end and move to the next state. Output 1. Otherwise, output 0.
-
-
-
-Follow-up Instructions State
-Provide clear instructions for any necessary follow-up steps.
-Outline the specific actions the patient needs to take, such as scheduling further tests, booking a follow-up appointment, or seeking in-person medical care if required. Provide contact information for any questions or concerns that may arise. Emphasize the importance of adhering to the recommended follow-up plan and address any potential barriers or challenges the patient may face.
-If the patient acknowledges and understands the follow-up instructions, the state should be end and move to the next state. Output 1. Otherwise, output 0.
-"""
-
- online_legal_consultant = """input:
-An online legal advisor who can respond to inquiries related to legal matters, providing basic legal information and advice.
-output:
-Online Legal Advisor
-
-
-Active Listening State
-Listen attentively to clients' concerns and queries.
-1. Give clients your full attention and avoid interrupting them.
-2. Take notes to ensure accurate understanding of the details.
-3. Ask clarifying questions to gather additional information if needed.
-4. Show empathy and understanding towards clients' emotions and concerns.
-5. Avoid making assumptions or jumping to conclusions.
-If the client has fully expressed their concerns and queries, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Analysis State
-Analyze the legal situation based on the gathered information.
-1. Research relevant laws, regulations, and precedents related to the client's case.
-2. Consider any specific circumstances or factors that may impact the legal analysis.
-3. Consult legal databases, journals, and other reliable sources for accurate information.
-4. Take into account any recent legal developments or changes that may affect the case.
-5. Ensure that the legal advice provided is up-to-date and accurate.
-If the legal situation has been thoroughly analyzed, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Clear Communication State
-Communicate legal concepts in a clear and concise manner.
-1. Avoid using complex legal jargon that may confuse clients.
-2. Break down legal concepts into simple and understandable terms.
-3. Use examples or analogies to illustrate legal principles.
-4. Check for client understanding and address any questions or confusion.
-5. Provide written summaries or explanations if necessary.
-If the client has demonstrated understanding of the communicated legal concepts, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Comprehensive Information State
-Provide clients with comprehensive information about their legal rights, obligations, and potential outcomes.
-1. Explain the legal rights and obligations relevant to the client's case.
-2. Discuss potential outcomes or consequences of different legal actions.
-3. Provide information about alternative dispute resolution methods, if applicable.
-4. Offer resources or references for further research or information.
-5. Address any specific concerns or questions raised by the client.
-If the client has received comprehensive information and their questions have been addressed, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Practical Solutions State
-Offer practical solutions tailored to the client's specific circumstances.
-1. Consider the client's goals, resources, and potential risks.
-2. Present different options or strategies for resolving the legal matter.
-3. Discuss the pros and cons of each option and their potential outcomes.
-4. Provide guidance on the steps to take to implement the chosen solution.
-5. Address any concerns or doubts the client may have about the proposed solutions.
-If the client has agreed on a practical solution and is ready to proceed, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Timely Responses State
-Ensure prompt responses to inquiries and minimize unnecessary delays.
-1. Respond to client inquiries as soon as possible.
-2. Set clear expectations regarding response times.
-3. Inform clients of any potential delays or timeframes for further actions.
-4. Provide regular updates on the progress of the legal matter.
-5. Apologize and explain any delays that may occur, if necessary.
-If the client has received a timely response and is satisfied with the communication, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Building Trust and Rapport State
-Establish trust and rapport with clients.
-1. Maintain a professional and respectful demeanor.
-2. Show empathy and understanding towards clients' concerns.
-3. Demonstrate active listening and genuine interest in their case.
-4. Be transparent and honest about the legal process and potential outcomes.
-5. Foster open communication and encourage clients to ask questions or seek clarification.
-If the client feels comfortable discussing their legal concerns openly and trusts the advisor, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Referral State
-Refer clients to specialized experts when necessary.
-1. Recognize cases that require specialized expertise beyond the advisor's scope.
-2. Maintain a network of trusted colleagues or professionals in various legal areas.
-3. Explain the reasons for the referral and the benefits of seeking specialized assistance.
-4. Provide contact information or facilitate the connection with the referred expert.
-5. Follow up with the client to ensure a smooth transition to the specialized expert.
-If the client agrees to the referral and expresses willingness to seek specialized assistance, the state should be end and move to the next state, output 1. Otherwise, output 0.
-"""
-
- online_financial_advisor = """input:
-An online financial advisor who can analyze financial markets and data, offering investment advice and market forecasts to users.
-output:
-Online Financial Advisor
-
-
-Data Gathering State
-Gather relevant financial data from various reliable sources
-Ensure that the sources of financial data are reputable and up-to-date. Use a combination of primary and secondary sources, including market reports, economic indicators, and company financial statements. Verify the accuracy and reliability of the data before proceeding with the analysis.
-If all the relevant financial data has been gathered, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Data Analysis State
-Analyze the gathered financial data to identify investment opportunities and potential risks
-Utilize advanced analytical tools and models to conduct quantitative and qualitative analysis. Consider factors such as market volatility, industry performance, macroeconomic conditions, and company financial health. Pay attention to key indicators and trends that may impact investment decisions.
-If the analysis is complete and investment opportunities and risks have been identified, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-User Engagement State
-Engage in detailed discussions with users to understand their financial circumstances and objectives
-Ask relevant questions to gather information about the user's financial goals, risk tolerance, and investment preferences. Listen actively and empathetically to the user's responses. Tailor recommendations and forecasts to align with the user's specific needs.
-If the user's financial circumstances and objectives have been understood, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Market Monitoring State
-Monitor market trends and developments to identify potential investment opportunities
-Stay updated with industry conferences, financial publications, and online forums. Leverage the network of industry professionals to gain insights and validate analysis. Continuously track market indicators and news that may impact investment decisions.
-If potential investment opportunities have been identified based on market trends and developments, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Investment Recommendation State
-Formulate investment recommendations and market forecasts based on analysis
-Consider factors such as risk-reward ratios, potential catalysts, and long-term growth prospects. Present findings to users through comprehensive reports, charts, and interactive presentations. Ensure that the rationale behind recommendations is clearly communicated.
-If investment recommendations and market forecasts have been formulated, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Monitoring and Adjusting State
-Monitor the performance of recommended investments and adjust recommendations as needed
-Regularly review the performance of recommended investments and assess their alignment with user goals. Stay updated with market changes and adjust recommendations accordingly. Continuously communicate with users, addressing any concerns and providing ongoing support.
-If the performance of recommended investments has been monitored and adjustments have been made as needed, the state should be end and move to the next state, output 1. Otherwise, output 0.
-"""
- virtual_tour_guide = """input:
-A virtual tour guide providing destination information, travel recommendations, and virtual travel experiences for travelers.
-output:
-Virtual Tour Guide
-
-
-Research State
-Conduct in-depth research about the destination, including its history, culture, and attractions.
-Use reliable sources such as travel blogs, books, documentaries, and official tourism websites to gather accurate and up-to-date information. Take notes and organize the research material for easy reference during virtual tours. Pay special attention to lesser-known spots and off-the-beaten-path adventures to provide unique experiences to travelers.
-If the research is complete, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Personalization State
-Understand the traveler's preferences, interests, and desired experiences.
-Initiate a conversation with the traveler to gather information about their travel style, hobbies, and previous travel experiences. Ask specific questions about their desired landmarks or activities they wish to explore. Actively listen and take notes to create a personalized itinerary that caters to their unique tastes.
-If the traveler's preferences are gathered, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Curating Experience State
-Create a virtual travel experience that combines the destination's highlights with hidden gems.
-Select engaging and interactive elements such as quizzes, challenges, and virtual reality experiences to keep travelers entertained throughout the tour. Ensure a balance between well-known landmarks and lesser-known spots to provide a comprehensive and authentic experience. Pay attention to the pacing of the tour to maintain the traveler's interest.
-If the virtual travel experience is curated, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Communication State
-Maintain open and frequent communication with travelers.
-Provide travelers with all the necessary details about the virtual travel experience, including the required technology (e.g., VR headsets, video streaming platforms). Ensure they have access to the necessary resources to fully immerse themselves in the tour. Respond promptly to any inquiries or concerns they may have.
-If the communication is established, the state should be end and move to the next state, output 1. Otherwise, output 0.
-
-
-
-Feedback and Improvement State
-Encourage travelers to provide feedback and use it to enhance future tours.
-After each virtual travel experience, ask travelers for their feedback and suggestions. Value their opinions and use their input to improve the overall tour experience. Consider adjusting the pacing, adding more interactive elements, or exploring new destinations based on the feedback received.
-If feedback is received, the state should be end and move to the next state, output 1. Otherwise, output 0.
-"""
- if index == 0:
- example = design_assistant
- elif index == 1:
- example = tutor
- elif index == 2 :
- example = online_medical_consultant
- elif index == 3 :
- example = online_legal_consultant
- elif index == 4 :
- example = online_financial_advisor
- elif index == 5 :
- example = virtual_tour_guide
- else:
- example = default
-
- return """You are a master of character description, and your goal is to design several states for the character based on the provided character information. For each state, outline the character's tasks and the rules that can help them better accomplish these tasks, ultimately aiding them in achieving their final objective.
-input:{{the discription of the target character}}
-output:
-{{the discription of the role of the character}}
-
-
-{{the name of the state}}
-the task of the character in current state
-the rules that can help target character better acomplish his tasks in current state
-{{when to leave this state to next state.Must strictly follow the format of:If {{when to leave}},the state should be end and move to next state,output1,else if the state should not be end,output 0}}
-
-
-For example:
-{}
-
-Note:
-1.Descriptions must be concise and clear.
-2.You must complete more details to make the entire process reasonable and not a streamlined account.
-3.The above is just an example, you don't have to imitate it, and the content should be as different as possible while ensuring the format is correct.
-""".format(example)
-
-
-design_states_cot_system_prompt="""You are a character description master.Please translate the into more reasonable expressions,enrich his character details and behavioral logic to make his behavior more reasonable ,help him design more steps to better complete his tasks in the current scenario, and allowing the scene to proceed normally), and think carefully step by step!"""
-
diff --git a/spaces/Abdul09/bingo_demo/README.md b/spaces/Abdul09/bingo_demo/README.md
deleted file mode 100644
index 14eb69ca2c5db9ef36af76536a6cf4cca390fcb8..0000000000000000000000000000000000000000
--- a/spaces/Abdul09/bingo_demo/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Bingo_demo
-emoji: 🔥
-colorFrom: red
-colorTo: purple
-sdk: gradio
-sdk_version: 3.0.12
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Abdul09/bingo_demo/app.py b/spaces/Abdul09/bingo_demo/app.py
deleted file mode 100644
index e3f83a547ee1851b83b86680fc33890e9d1d3337..0000000000000000000000000000000000000000
--- a/spaces/Abdul09/bingo_demo/app.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import gradio as gr
-import numpy as np
-import tensorflow as tf
-import keras
-
-from keras.models import load_model
-from keras.preprocessing import image
-
-cnn_model = keras.models.load_model("fine_tuning.keras")
-
-def detect(img):
- img = img.reshape(-1,180,180,3)
- prediction = np.around(cnn_model.predict(img)[0], decimals=0)[0]
-
- if prediction == 1:
- return "Pneumonia Detected!"
-
- return "Pneumonia Not Detected!"
-
-#set the user uploaded image as the input array
-#match same shape as the input shape in the model
-
-image_input = gr.inputs.Image( shape=(180, 180) ,invert_colors=False , type="numpy" )
-
-title = "PneumoDetect: Pneumonia Detection from Chest X-Rays"
-
-
-#setup the interface
-iface = gr.Interface(
- fn = detect,
- inputs = image_input,
- outputs = gr.outputs.Label(),
-)
-iface.launch(share=True , debug = True )
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/index.ts b/spaces/AgentVerse/agentVerse/ui/src/index.ts
deleted file mode 100644
index abae0ffb053f53e535759e8159bf87b55d9467a2..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/index.ts
+++ /dev/null
@@ -1,79 +0,0 @@
-import { Game, Scale, Types, WEBGL } from "phaser";
-
-import { TownScene, LoadingScene } from "./scenes";
-import UIPlugin from "./phaser3-rex-plugins/templates/ui/ui-plugin";
-import BoardPlugin from "./phaser3-rex-plugins/plugins/board-plugin";
-
-declare global {
- interface Window {
- sizeChanged: () => void;
- game: Game;
- }
-}
-
-export const gameConfig: Types.Core.GameConfig = {
- title: "Phaser game tutorial",
- type: WEBGL,
- parent: "game",
- // backgroundColor: '#351f1b',
- scale: {
- mode: Scale.ScaleModes.NONE,
- width: window.innerWidth,
- height: window.innerHeight,
- },
- physics: {
- default: "arcade",
- arcade: {
- debug: false,
- },
- },
- render: {
- antialiasGL: false,
- pixelArt: true,
- },
- callbacks: {
- postBoot: () => {
- window.sizeChanged();
- },
- },
- canvasStyle: `display: block; width: 100%; height: 100%;`,
- autoFocus: true,
- audio: {
- disableWebAudio: false,
- },
- scene: [LoadingScene, TownScene],
- dom: {
- createContainer: true,
- },
- plugins: {
- scene: [
- {
- key: "rexUI",
- plugin: UIPlugin,
- mapping: "rexUI",
- },
- {
- key: "rexBoard",
- plugin: BoardPlugin,
- mapping: "rexBoard",
- },
- ],
- },
-};
-
-window.sizeChanged = () => {
- if (window.game.isBooted) {
- setTimeout(() => {
- window.game.scale.resize(window.innerWidth, window.innerHeight);
-
- window.game.canvas.setAttribute(
- "style",
- `display: block; width: ${window.innerWidth}px; height: ${window.innerHeight}px;`
- );
- }, 100);
- }
-};
-
-window.onresize = () => window.sizeChanged();
-
-window.game = new Game(gameConfig);
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/colorreplacepipeline.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/colorreplacepipeline.js
deleted file mode 100644
index 9f770a18f04f75c296ac9d801f3f9df5cbda23de..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/colorreplacepipeline.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import ColorReplacePostFxPipeline from './shaders/colorreplace/ColorReplacePostFxPipeline.js';
-export default ColorReplacePostFxPipeline;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorcomponents/Factory.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorcomponents/Factory.d.ts
deleted file mode 100644
index 422b94cb2ab57cc7f7d2b8e051d837528b0d8c55..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorcomponents/Factory.d.ts
+++ /dev/null
@@ -1,5 +0,0 @@
-import ColorComponents from './ColorComponents';
-
-export default function (
- config?: ColorComponents.IConfig
-): ColorComponents;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorpicker/Factory.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorpicker/Factory.d.ts
deleted file mode 100644
index a6c15e53d933125b0d5570f3e75765a7f2e58884..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorpicker/Factory.d.ts
+++ /dev/null
@@ -1,5 +0,0 @@
-import ColorPicker from './ColorPicker';
-
-export default function (
- config?: ColorPicker.IConfig
-): ColorPicker;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetExpandedChildWidth.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetExpandedChildWidth.js
deleted file mode 100644
index 73ab9a8a7157b497adabb7837c123c0713945d2f..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetExpandedChildWidth.js
+++ /dev/null
@@ -1,11 +0,0 @@
-var GetExpandedChildWidth = function (child, colWidth) {
- var childWidth;
- var childConfig = child.rexSizer;
- if (childConfig.expand) {
- var padding = childConfig.padding;
- childWidth = colWidth - padding.left - padding.right;
- }
- return childWidth;
-}
-
-export default GetExpandedChildWidth;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/Factory.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/Factory.d.ts
deleted file mode 100644
index 18719eb2ae5e0b05540eb70c1a137aec4bb68640..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/Factory.d.ts
+++ /dev/null
@@ -1,16 +0,0 @@
-import RoundRectangle from './RoundRectangle';
-
-export default function (
- x: number,
- y: number,
- width: number,
- height: number,
- radiusConfig?: number | ({ x?: number, y?: number }) | RoundRectangle.IRadiusConfig |
- ({
- radius?: (number | ({ x?: number, y?: number }) | RoundRectangle.IRadiusConfig),
- iteration?: number
- }),
- fillColor?: number,
- fillAlpha?: number
-
-): RoundRectangle;
\ No newline at end of file
diff --git a/spaces/Aki004/herta-so-vits/wav_upload.py b/spaces/Aki004/herta-so-vits/wav_upload.py
deleted file mode 100644
index 1a347fa9359edc21dcd9fe633579bc657c0a3fd4..0000000000000000000000000000000000000000
--- a/spaces/Aki004/herta-so-vits/wav_upload.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from google.colab import files
-import shutil
-import os
-import argparse
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--type", type=str, required=True, help="type of file to upload")
- args = parser.parse_args()
- file_type = args.type
-
- basepath = os.getcwd()
- uploaded = files.upload()
- assert(file_type in ['zip', 'audio'])
- if file_type == "zip":
- upload_path = "./upload/"
- for filename in uploaded.keys():
- shutil.move(os.path.join(basepath, filename), os.path.join(upload_path, "userzip.zip"))
- elif file_type == "audio":
- upload_path = "./raw/"
- for filename in uploaded.keys():
- shutil.move(os.path.join(basepath, filename), os.path.join(upload_path, filename))
\ No newline at end of file
diff --git a/spaces/AlekseyKorshuk/accompaniment-generator/README.md b/spaces/AlekseyKorshuk/accompaniment-generator/README.md
deleted file mode 100644
index 619d47d73ad0754c1f1ce7ca3e84d0c9fbb8a339..0000000000000000000000000000000000000000
--- a/spaces/AlekseyKorshuk/accompaniment-generator/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Accompaniment Generator
-emoji: 🎶
-colorFrom: green
-colorTo: green
-sdk: streamlit
-sdk_version: 1.2.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py
deleted file mode 100644
index b3f879a6c573871ea17b2bf158173aadf14457b6..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py
+++ /dev/null
@@ -1,73 +0,0 @@
-_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-albu_train_transforms = [
- dict(
- type='ShiftScaleRotate',
- shift_limit=0.0625,
- scale_limit=0.0,
- rotate_limit=0,
- interpolation=1,
- p=0.5),
- dict(
- type='RandomBrightnessContrast',
- brightness_limit=[0.1, 0.3],
- contrast_limit=[0.1, 0.3],
- p=0.2),
- dict(
- type='OneOf',
- transforms=[
- dict(
- type='RGBShift',
- r_shift_limit=10,
- g_shift_limit=10,
- b_shift_limit=10,
- p=1.0),
- dict(
- type='HueSaturationValue',
- hue_shift_limit=20,
- sat_shift_limit=30,
- val_shift_limit=20,
- p=1.0)
- ],
- p=0.1),
- dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2),
- dict(type='ChannelShuffle', p=0.1),
- dict(
- type='OneOf',
- transforms=[
- dict(type='Blur', blur_limit=3, p=1.0),
- dict(type='MedianBlur', blur_limit=3, p=1.0)
- ],
- p=0.1),
-]
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
- dict(type='Pad', size_divisor=32),
- dict(
- type='Albu',
- transforms=albu_train_transforms,
- bbox_params=dict(
- type='BboxParams',
- format='pascal_voc',
- label_fields=['gt_labels'],
- min_visibility=0.0,
- filter_lost_elements=True),
- keymap={
- 'img': 'image',
- 'gt_masks': 'masks',
- 'gt_bboxes': 'bboxes'
- },
- update_pad_shape=False,
- skip_img_without_anno=True),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='DefaultFormatBundle'),
- dict(
- type='Collect',
- keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'],
- meta_keys=('filename', 'ori_shape', 'img_shape', 'img_norm_cfg',
- 'pad_shape', 'scale_factor'))
-]
-data = dict(train=dict(pipeline=train_pipeline))
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py
deleted file mode 100644
index eac05a64a22f28d597eb4c8b1c31351b52829056..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py
+++ /dev/null
@@ -1,13 +0,0 @@
-_base_ = './retinanet_r50_fpn_2x_coco.py'
-model = dict(
- pretrained='open-mmlab://resnext101_64x4d',
- backbone=dict(
- type='ResNeXt',
- depth=101,
- groups=64,
- base_width=4,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=True),
- style='pytorch'))
diff --git a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/dist_util.py b/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/dist_util.py
deleted file mode 100644
index 7acb48bfbb7ffefc039ce252d7c8342d770da0f2..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/dist_util.py
+++ /dev/null
@@ -1,93 +0,0 @@
-"""
-Helpers for distributed training.
-"""
-
-import io
-import os
-import socket
-
-import blobfile as bf
-from mpi4py import MPI
-import torch as th
-import torch.distributed as dist
-
-# Change this to reflect your cluster layout.
-# The GPU for a given rank is (rank % GPUS_PER_NODE).
-GPUS_PER_NODE = 8
-
-SETUP_RETRY_COUNT = 3
-
-
-def setup_dist():
- """
- Setup a distributed process group.
- """
- if dist.is_initialized():
- return
- os.environ["CUDA_VISIBLE_DEVICES"] = f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}"
-
- comm = MPI.COMM_WORLD
- backend = "gloo" if not th.cuda.is_available() else "nccl"
-
- if backend == "gloo":
- hostname = "localhost"
- else:
- hostname = socket.gethostbyname(socket.getfqdn())
- os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0)
- os.environ["RANK"] = str(comm.rank)
- os.environ["WORLD_SIZE"] = str(comm.size)
-
- port = comm.bcast(_find_free_port(), root=0)
- os.environ["MASTER_PORT"] = str(port)
- dist.init_process_group(backend=backend, init_method="env://")
-
-
-def dev():
- """
- Get the device to use for torch.distributed.
- """
- if th.cuda.is_available():
- return th.device(f"cuda")
- return th.device("cpu")
-
-
-def load_state_dict(path, **kwargs):
- """
- Load a PyTorch file without redundant fetches across MPI ranks.
- """
- chunk_size = 2 ** 30 # MPI has a relatively small size limit
- if MPI.COMM_WORLD.Get_rank() == 0:
- with bf.BlobFile(path, "rb") as f:
- data = f.read()
- num_chunks = len(data) // chunk_size
- if len(data) % chunk_size:
- num_chunks += 1
- MPI.COMM_WORLD.bcast(num_chunks)
- for i in range(0, len(data), chunk_size):
- MPI.COMM_WORLD.bcast(data[i : i + chunk_size])
- else:
- num_chunks = MPI.COMM_WORLD.bcast(None)
- data = bytes()
- for _ in range(num_chunks):
- data += MPI.COMM_WORLD.bcast(None)
-
- return th.load(io.BytesIO(data), **kwargs)
-
-
-def sync_params(params):
- """
- Synchronize a sequence of Tensors across ranks from rank 0.
- """
- for p in params:
- with th.no_grad():
- dist.broadcast(p, 0)
-
-
-def _find_free_port():
- try:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.bind(("", 0))
- s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- return s.getsockname()[1]
- finally:
- s.close()
diff --git a/spaces/Apex-X/GODROOP/roop/__init__.py b/spaces/Apex-X/GODROOP/roop/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Artrajz/vits-simple-api/vits/text/sanskrit.py b/spaces/Artrajz/vits-simple-api/vits/text/sanskrit.py
deleted file mode 100644
index 3e968dcb1c73b170a30dcdc8fbe8d1a0cb593da9..0000000000000000000000000000000000000000
--- a/spaces/Artrajz/vits-simple-api/vits/text/sanskrit.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import re
-from indic_transliteration import sanscript
-
-
-# List of (iast, ipa) pairs:
-_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('a', 'ə'),
- ('ā', 'aː'),
- ('ī', 'iː'),
- ('ū', 'uː'),
- ('ṛ', 'ɹ`'),
- ('ṝ', 'ɹ`ː'),
- ('ḷ', 'l`'),
- ('ḹ', 'l`ː'),
- ('e', 'eː'),
- ('o', 'oː'),
- ('k', 'k⁼'),
- ('k⁼h', 'kʰ'),
- ('g', 'g⁼'),
- ('g⁼h', 'gʰ'),
- ('ṅ', 'ŋ'),
- ('c', 'ʧ⁼'),
- ('ʧ⁼h', 'ʧʰ'),
- ('j', 'ʥ⁼'),
- ('ʥ⁼h', 'ʥʰ'),
- ('ñ', 'n^'),
- ('ṭ', 't`⁼'),
- ('t`⁼h', 't`ʰ'),
- ('ḍ', 'd`⁼'),
- ('d`⁼h', 'd`ʰ'),
- ('ṇ', 'n`'),
- ('t', 't⁼'),
- ('t⁼h', 'tʰ'),
- ('d', 'd⁼'),
- ('d⁼h', 'dʰ'),
- ('p', 'p⁼'),
- ('p⁼h', 'pʰ'),
- ('b', 'b⁼'),
- ('b⁼h', 'bʰ'),
- ('y', 'j'),
- ('ś', 'ʃ'),
- ('ṣ', 's`'),
- ('r', 'ɾ'),
- ('l̤', 'l`'),
- ('h', 'ɦ'),
- ("'", ''),
- ('~', '^'),
- ('ṃ', '^')
-]]
-
-
-def devanagari_to_ipa(text):
- text = text.replace('ॐ', 'ओम्')
- text = re.sub(r'\s*।\s*$', '', text)
- text = re.sub(r'\s*।\s*', ', ', text)
- text = re.sub(r'\s*॥', '', text)
- text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST)
- for regex, replacement in _iast_to_ipa:
- text = re.sub(regex, replacement, text)
- text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0)
- [:-1]+'h'+x.group(1)+'*', text)
- return text
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py
deleted file mode 100644
index 34e3a9950cc557879af8d797f9382b18a870fb56..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Read resources contained within a package."""
-
-from ._common import (
- as_file,
- files,
- Package,
-)
-
-from ._legacy import (
- contents,
- open_binary,
- read_binary,
- open_text,
- read_text,
- is_resource,
- path,
- Resource,
-)
-
-from .abc import ResourceReader
-
-
-__all__ = [
- 'Package',
- 'Resource',
- 'ResourceReader',
- 'as_file',
- 'contents',
- 'files',
- 'is_resource',
- 'open_binary',
- 'open_text',
- 'path',
- 'read_binary',
- 'read_text',
-]
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/cascade_rcnn.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/cascade_rcnn.py
deleted file mode 100644
index c7372a801dc00d7fec4db8cda8c2612ce281d48a..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/cascade_rcnn.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from detectron2.config import LazyCall as L
-from detectron2.layers import ShapeSpec
-from detectron2.modeling.box_regression import Box2BoxTransform
-from detectron2.modeling.matcher import Matcher
-from detectron2.modeling.roi_heads import FastRCNNOutputLayers, FastRCNNConvFCHead, CascadeROIHeads
-
-from .mask_rcnn_fpn import model
-
-# arguments that don't exist for Cascade R-CNN
-[model.roi_heads.pop(k) for k in ["box_head", "box_predictor", "proposal_matcher"]]
-
-model.roi_heads.update(
- _target_=CascadeROIHeads,
- box_heads=[
- L(FastRCNNConvFCHead)(
- input_shape=ShapeSpec(channels=256, height=7, width=7),
- conv_dims=[],
- fc_dims=[1024, 1024],
- )
- for k in range(3)
- ],
- box_predictors=[
- L(FastRCNNOutputLayers)(
- input_shape=ShapeSpec(channels=1024),
- test_score_thresh=0.05,
- box2box_transform=L(Box2BoxTransform)(weights=(w1, w1, w2, w2)),
- cls_agnostic_bbox_reg=True,
- num_classes="${...num_classes}",
- )
- for (w1, w2) in [(10, 5), (20, 10), (30, 15)]
- ],
- proposal_matchers=[
- L(Matcher)(thresholds=[th], labels=[0, 1], allow_low_quality_matches=False)
- for th in [0.5, 0.6, 0.7]
- ],
-)
diff --git a/spaces/AzumaSeren100/XuanShen-Bert-VITS2/transforms.py b/spaces/AzumaSeren100/XuanShen-Bert-VITS2/transforms.py
deleted file mode 100644
index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000
--- a/spaces/AzumaSeren100/XuanShen-Bert-VITS2/transforms.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails=None,
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
-
- if tails is None:
- spline_fn = rational_quadratic_spline
- spline_kwargs = {}
- else:
- spline_fn = unconstrained_rational_quadratic_spline
- spline_kwargs = {
- 'tails': tails,
- 'tail_bound': tail_bound
- }
-
- outputs, logabsdet = spline_fn(
- inputs=inputs,
- unnormalized_widths=unnormalized_widths,
- unnormalized_heights=unnormalized_heights,
- unnormalized_derivatives=unnormalized_derivatives,
- inverse=inverse,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- **spline_kwargs
- )
- return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
- bin_locations[..., -1] += eps
- return torch.sum(
- inputs[..., None] >= bin_locations,
- dim=-1
- ) - 1
-
-
-def unconstrained_rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails='linear',
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
- outside_interval_mask = ~inside_interval_mask
-
- outputs = torch.zeros_like(inputs)
- logabsdet = torch.zeros_like(inputs)
-
- if tails == 'linear':
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
- constant = np.log(np.exp(1 - min_derivative) - 1)
- unnormalized_derivatives[..., 0] = constant
- unnormalized_derivatives[..., -1] = constant
-
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
- logabsdet[outside_interval_mask] = 0
- else:
- raise RuntimeError('{} tails are not implemented.'.format(tails))
-
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
- inputs=inputs[inside_interval_mask],
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
- inverse=inverse,
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative
- )
-
- return outputs, logabsdet
-
-def rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- left=0., right=1., bottom=0., top=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- if torch.min(inputs) < left or torch.max(inputs) > right:
- raise ValueError('Input to a transform is not within its domain')
-
- num_bins = unnormalized_widths.shape[-1]
-
- if min_bin_width * num_bins > 1.0:
- raise ValueError('Minimal bin width too large for the number of bins')
- if min_bin_height * num_bins > 1.0:
- raise ValueError('Minimal bin height too large for the number of bins')
-
- widths = F.softmax(unnormalized_widths, dim=-1)
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
- cumwidths = torch.cumsum(widths, dim=-1)
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
- cumwidths = (right - left) * cumwidths + left
- cumwidths[..., 0] = left
- cumwidths[..., -1] = right
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
- heights = F.softmax(unnormalized_heights, dim=-1)
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
- cumheights = torch.cumsum(heights, dim=-1)
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
- cumheights = (top - bottom) * cumheights + bottom
- cumheights[..., 0] = bottom
- cumheights[..., -1] = top
- heights = cumheights[..., 1:] - cumheights[..., :-1]
-
- if inverse:
- bin_idx = searchsorted(cumheights, inputs)[..., None]
- else:
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
- delta = heights / widths
- input_delta = delta.gather(-1, bin_idx)[..., 0]
-
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
- input_heights = heights.gather(-1, bin_idx)[..., 0]
-
- if inverse:
- a = (((inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta)
- + input_heights * (input_delta - input_derivatives)))
- b = (input_heights * input_derivatives
- - (inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta))
- c = - input_delta * (inputs - input_cumheights)
-
- discriminant = b.pow(2) - 4 * a * c
- assert (discriminant >= 0).all()
-
- root = (2 * c) / (-b - torch.sqrt(discriminant))
- outputs = root * input_bin_widths + input_cumwidths
-
- theta_one_minus_theta = root * (1 - root)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - root).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, -logabsdet
- else:
- theta = (inputs - input_cumwidths) / input_bin_widths
- theta_one_minus_theta = theta * (1 - theta)
-
- numerator = input_heights * (input_delta * theta.pow(2)
- + input_derivatives * theta_one_minus_theta)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- outputs = input_cumheights + numerator / denominator
-
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - theta).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, logabsdet
diff --git a/spaces/BG5/midjourney/Dockerfile b/spaces/BG5/midjourney/Dockerfile
deleted file mode 100644
index 705712762708f1b99ef7491549007df0629f8edc..0000000000000000000000000000000000000000
--- a/spaces/BG5/midjourney/Dockerfile
+++ /dev/null
@@ -1,34 +0,0 @@
-# Build Stage
-# 使用 golang:alpine 作为构建阶段的基础镜像
-FROM golang:alpine AS builder
-
-# 添加 git,以便之后能从GitHub克隆项目
-RUN apk --no-cache add git
-
-# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下
-RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app
-
-# 设置工作目录为之前克隆的项目目录
-WORKDIR /workspace/app
-
-# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小
-RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go
-
-# Runtime Stage
-# 使用轻量级的 alpine 镜像作为运行时的基础镜像
-FROM alpine
-
-# 设置工作目录
-WORKDIR /workspace/app
-
-# 从构建阶段复制编译后的二进制文件到运行时镜像中
-COPY --from=builder /workspace/app/go-proxy-bingai .
-
-# 设置环境变量,此处为随机字符
-ENV Go_Proxy_BingAI_USER_TOKEN_1="1sCfIl5u4E0FbXetZiwE2L7I2a9Qfq7-EGfUlypt3dbv30kGSXWJLdZ45lKXcSQ6SGjbP85WESI6aktHDoagZQN-gU6dcIiqwMXzF_p2-exXchVuOAlv-IxI9i7FpKkN-AEmQOTc8RXKhC99_RutOH6UBEdgTEHFNkVvpritL6150rR0PbvDVzc-DFNzOSSxQO503lJNR6MJ70C5GaB68VLAvHRPEL9XgOXKnh1_a2cU"
-
-# 暴露8080端口
-EXPOSE 8080
-
-# 容器启动时运行的命令
-CMD ["/workspace/app/go-proxy-bingai"]
\ No newline at end of file
diff --git a/spaces/Bakar31/MLOps_Practice_Repo_1/Makefile b/spaces/Bakar31/MLOps_Practice_Repo_1/Makefile
deleted file mode 100644
index 52924059243e3aa45d64c4548f6bf9cf65632a07..0000000000000000000000000000000000000000
--- a/spaces/Bakar31/MLOps_Practice_Repo_1/Makefile
+++ /dev/null
@@ -1,22 +0,0 @@
-install:
- pip install --upgrade pip &&\
- pip install -r requirements.txt
-
-test:
- python -m pytest -vv --cov=main --cov=mylib test_*.py
-
-format:
- black *.py
-
-lint:
- pylint --disable=R,C --ignore-patterns=test_.*?py *.py mylib/*.py
-
-container-lint:
- docker run --rm -i hadolint/hadolint < Dockerfile
-
-refactor: format lint
-
-deploy:
- #deploy goes here
-
-all: install lint test format deploy
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/10mo Boleto Sala De Descarga 2018 Ap.md b/spaces/Benson/text-generation/Examples/10mo Boleto Sala De Descarga 2018 Ap.md
deleted file mode 100644
index 1f203c2eaf3d300b76d180de05dd18f618a89d6e..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/10mo Boleto Sala De Descarga 2018 Ap.md
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
Cómo descargar el boleto AP 10th Hall 2018 en línea
-
Si usted es un estudiante de la clase 10 en Andhra Pradesh y se está preparando para los exámenes de la junta, debe preguntarse cómo descargar su boleto de la sala en línea. Un boleto de pasillo es un documento que sirve como su prueba de identidad y pase de entrada para el examen. Sin un boleto válido, no se le permitirá presentarse al examen. En este artículo, le diremos todo lo que necesita saber sobre cómo descargar el boleto AP 10th hall 2018 en línea desde el sitio web oficial y otras fuentes.
Un ticket de pasillo es un documento que también se conoce como tarjeta de admisión, carta de llamada o tarjeta de intimación. Es emitido por la autoridad que conduce el examen a los candidatos registrados antes del examen. Contiene detalles importantes como su nombre, número de lista, fotografía, firma, fecha del examen, hora, lugar, instrucciones, etc. Un boleto de pasillo es un número único impreso en la tarjeta de admisión que ayuda a asignar su asiento durante el examen.
-
Importancia de las entradas para los exámenes
-
Un ticket de pasillo es un documento crucial que valida su elegibilidad e identidad para aparecer en el examen. Algunos de la importancia clave de un boleto de pasillo son:
-
-
Un ticket de pasillo sirve como una prueba de identidad válida. Contiene detalles importantes como nombre, número de rollo, fotografía y firma, que son verificados por los revisores.
-
Un boleto de pasillo proporciona acceso al centro de examen. Sin un boleto válido, no se le permitirá entrar o aparecer para el examen.
-
Un ticket de pasillo contiene instrucciones y pautas que debes seguir mientras apareces para el examen. El incumplimiento de estas instrucciones puede resultar en sanciones o descalificación.
-
-
-
Cómo descargar AP 10th Hall Ticket 2018 en línea desde el sitio web oficial
-
Pasos para descargar boleto de pasillo en línea
-
El sitio web oficial de la Junta de Educación Secundaria Andhra Pradesh (BSEAP) es [7](https://www.bse.ap.gov.in/). Puedes descargar tu boleto de AP 10th hall 2018 online desde este sitio web siguiendo estos pasos:
-
-
Visite el sitio web oficial de la BSEAP en [7](https://www.bse.ap.gov.in/).
-
Haga clic en el enlace "S.S.C - HallTickets Download" en el menú del lado izquierdo.
-
Seleccione su tipo de examen de Regular, Privado, OSSC, o vocacional.
-
Seleccione su distrito, nombre de la escuela y nombre del candidato en los menús desplegables.
-
Ingrese su fecha de nacimiento en el formato DD/MM/AAAA.
-
Haga clic en el botón "Descargar HallTicket".
-
Su boleto de pasillo aparecerá en la pantalla. Compruebe todos los detalles cuidadosamente y tome una impresión de ella.
-
-
Cosas para comprobar en el ticket de la sala
-
Después de descargar su boleto de entrada en línea, usted debe comprobar las siguientes cosas en él:
-
-
Su nombre, número de rollo, fotografía y firma son correctos y claros.
-
La fecha, hora y lugar del examen se mencionan correctamente.
-
Su - Su nombre de tema, código y programa se mencionan correctamente.
-
Su boleto de entrada tiene un código de barras y un holograma para la verificación.
-
Su boleto de pasillo tiene la firma y el sello de la autoridad de examen.
-
Su boleto de entrada tiene las instrucciones y directrices para el examen.
-
-
Si encuentra alguna discrepancia o error en su boleto de entrada, debe ponerse en contacto con las autoridades escolares o con el número de teléfono de ayuda de BSEAP al 0866-2974130 o enviar un correo electrónico a dir_govexams@yahoo.com.
-
-
Cómo descargar el boleto del 10º Hall 2018 en línea desde otras fuentes
-
Sitios web alternativos para descargar boleto de pasillo en línea
-
-
-
Nombre del sitio web
URL del sitio web
-
Manabadi
[1](https://www.manabadi.co.in/)
-
Escuelas
[2](https://www.schools9.com/)
-
Resultados de la India
[3](https://www.indiaresults.com/)
-
Jagran Josh
[4](https://www.jagranjosh.com/)
-
Vidyavision
[5](https://www.vidyavision.com/)
-
Educación Sakshi
[6](https://sakshieducation.com/)
-
-
Precauciones a tomar al descargar el ticket de hall de otras fuentes
-
Al descargar su boleto de entrada en línea desde otras fuentes, debe tomar algunas precauciones para evitar problemas o problemas. Algunas de estas precauciones son:
-
-
Asegúrese de que el sitio web que está utilizando es confiable y seguro. Compruebe la URL, el nombre de dominio y el certificado SSL del sitio web.
-
No comparta sus datos personales o confidenciales como su nombre, número de registro, fecha de nacimiento, etc. con ningún sitio web no autorizado o sospechoso.
-
No descargue ningún archivo o software que pueda contener malware o virus que puedan dañar su dispositivo o datos.
-
Compare su boleto de entrada descargado de otras fuentes con el descargado desde el sitio web oficial. Si hay alguna diferencia o discrepancia, informe a la autoridad examinadora inmediatamente.
-
Tome una impresión de su boleto de pasillo y manténgalo seguro hasta que el examen haya terminado.
-
-
Cómo lidiar con boletos perdidos o dañados
-
Pasos para reportar ticket de pasillo perdido o dañado
-
Si pierde o daña su boleto antes o durante el examen, no debe entrar en pánico y siga estos pasos:
-
-
Póngase en contacto con las autoridades escolares o con el número de teléfono de asistencia de BSEAP al 0866-2974130 o envíe un correo electrónico a dir_govexams@yahoo.com.
-
Explique su situación y proporcione sus detalles como su nombre, número de rollo, fecha de nacimiento, etc.
-
-
Recoja su boleto de pasillo duplicado o carta de permiso del centro de examen o de la oficina de BSEAP antes del examen.
-
Lleve su boleto de pasillo duplicado o carta de permiso junto con una prueba de identidad válida, como tarjeta Aadhaar, tarjeta de identificación de votante, etc. al centro de examen.
-
-
Penalización por ticket de pasillo perdido o dañado
-
Si pierde o daña su boleto de pasillo debido a negligencia o descuido, es posible que tenga que pagar una multa de Rs. 100/- para obtener un boleto de pasillo duplicado o una carta de permiso. También puede enfrentar algún inconveniente o retraso en obtener su boleto de pasillo duplicado o carta de permiso. Por lo tanto, es recomendable mantener su boleto de pasillo original seguro hasta que el examen haya terminado.
-
Conclusión
-
Un ticket de pasillo es un documento vital que usted necesita llevar para aparecer para los exámenes de la AP 10º. Puedes descargar tu boleto de la 10a Sala AP 2018 en línea desde el sitio web oficial de BSEAP o desde otras fuentes. Usted debe comprobar todos los detalles en su boleto de pasillo cuidadosamente y reportar cualquier discrepancia o error a la autoridad examinadora. También debe tomar precauciones al descargar su boleto de pasillo en línea desde otras fuentes y evitar perder o dañar su boleto de pasillo. Si pierde o daña su boleto de pasillo, debe reportarlo a la autoridad examinadora y obtener un boleto de pasillo duplicado o una carta de permiso. Esperamos que este artículo te haya ayudado a entender cómo descargar online el boleto AP 10th hall 2018 y qué hacer en caso de cualquier problema. ¡Te deseamos lo mejor para tus exámenes!
-
Preguntas frecuentes
-
Q1. ¿Cuándo estará disponible en línea el boleto de la 10ª sala AP 2018?
-
A1. El billete para el 10º pabellón de la AP 2018 estará disponible en línea desde la primera semana de marzo de 2018. Puede descargarlo del sitio web oficial de la BSEAP o de otras fuentes.
-
Q2. ¿Cuáles son los detalles necesarios para descargar el boleto de la 10ª sala AP 2018 en línea?
-
-
Q3. ¿Qué pasa si me olvido de llevar mi boleto al centro de examen?
-
A3. Si te olvidas de llevar tu boleto al centro de examen, no se te permitirá presentarse al examen. Debe ponerse en contacto con las autoridades escolares o con el número de la línea de ayuda de la BSEAP de inmediato y solicitar una entrada duplicada o una carta de permiso.
-
Q4. ¿Cómo puedo comprobar los resultados de mis exámenes usando mi ticket de entrada?
-
A4. Para comprobar los resultados de los exámenes utilizando su boleto de inscripción, debe visitar el sitio web oficial de BSEAP o cualquier otro portal de resultados e ingresar su número de registro, que está impreso en su boleto de inscripción. También puede comprobar sus resultados por SMS o correo electrónico.
-
Q5. ¿Cuáles son algunos consejos para prepararse para los exámenes de la AP 10?
-
A5. Algunos consejos para prepararse para los exámenes AP 10º son:
-
-
Siga un programa de estudio regular y revise el programa a fondo.
-
Resolver documentos de preguntas del año anterior y documentos de muestra para familiarizarse con el patrón de examen y el nivel de dificultad.
-
Práctica de escribir respuestas dentro del límite de tiempo dado y límite de palabras.
-
Enfócate en tus áreas débiles y despeja tus dudas con tus profesores o compañeros.
-
Mantener una dieta saludable y dormir bien antes del examen.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Bussid Mod.md b/spaces/Benson/text-generation/Examples/Bussid Mod.md
deleted file mode 100644
index bba89f3e6b4fa3ccf819e00e09a748bd06ef9253..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Bussid Mod.md
+++ /dev/null
@@ -1,66 +0,0 @@
-
-
Simulador de batalla de rebelión animal: un juego de caja de arena basado en la física
-
¿Alguna vez te has preguntado qué pasaría si pusieras un T-rex con una pistola láser contra un dragón con alas? ¿O qué tal un tiburón con un jetpack contra una araña gigante con un lanzallamas? Si estás buscando un juego que te permita crear divertidas y épicas batallas entre todo tipo de criaturas ragdoll, entonces deberías echar un vistazo a Animal Revolt Battle Simulator. Este es un juego de sandbox basado en la física que le da la máxima libertad y flexibilidad para diseñar sus propios escenarios y verlos desarrollarse en tiempo real. También puedes unirte a la lucha tú mismo en el modo en primera persona y utilizar algunas armas de gran alcance para disparar a tus enemigos. En este artículo, te mostraremos cómo descargar Animal Revolt Battle Simulator gratis, cómo jugarlo, cómo crear tus propios monstruos, cómo usar contenido personalizado del Steam Workshop y algunos consejos y trucos para hacer tus batallas más divertidas y emocionantes.
Si quieres probar Animal Revolt Battle Simulator gratis, tienes dos opciones. Puedes ir al sitio web oficial del juego o visitar su página de Steam. Estos son los pasos que debes seguir:
Paso 2: Haz clic en el botón de descarga o añade el juego a tu carrito. Necesitarás una cuenta de Steam para jugar al juego.
-
Paso 3: Siga las instrucciones para instalar y lanzar el juego. También puede ajustar la configuración según sus preferencias.
-
-
¡Felicidades! Has descargado Animal Revolt Battle Simulator gratis. Ahora puedes empezar a jugar y crear tus propias batallas.
-
Cómo jugar Animal Revolt Battle Simulator
-
-
-
Paso 1: Elige un modo de juego en el menú principal. También puedes acceder a la configuración, al creador de la unidad y al taller desde aquí.
-
Paso 2: Crea tus propios mapas o selecciona uno de los ya hechos. Puede utilizar diferentes terrenos, edificios, accesorios y efectos ambientales para crear sus propios escenarios. También puede ajustar la hora del día, el clima y la gravedad.
-
Paso 3: Coloca tus ejércitos de diferentes tipos de bestias y míralos luchar. Puedes elegir entre más de 100 criaturas, como dinosaurios, dragones, tiburones, arañas, elefantes, gorilas y más. También puedes equiparlos con diferentes armas, como pistolas, espadas, hachas, cohetes, láseres y más. También puede cambiar su tamaño, color, salud, velocidad y daño.
-
Paso 4: Únete a la batalla tú mismo en el modo de primera persona y usa armas para disparar a los enemigos. Puede cambiar entre el modo espectador y el modo en primera persona en cualquier momento. También puedes usar las funciones de cámara lenta y pausa para controlar el ritmo de la batalla.
-
-
¡Eso es todo! Has aprendido a jugar Animal Revolt Battle Simulator. También puedes reproducir tus batallas y compartirlas con tus amigos u otros jugadores en línea.
-
Cómo crear tus propios monstruos en Animal Revolt Battle Simulator
-
Una de las características más divertidas y creativas de Animal Revolt Battle Simulator es el modo creador de unidades. Este modo te permite crear tus propios monstruos combinando diferentes partes del cuerpo y armas. También puedes guardar tus monstruos y usarlos en tus batallas. Aquí te mostramos cómo crear tus propios monstruos en Animal Revolt Battle Simulator:
-
-
Paso 1: Vaya al modo creador de unidades desde el menú principal o presionando U en su teclado.
-
Paso 2: Selecciona una criatura base de la lista o usa una plantilla en blanco. También puedes cargar un monstruo previamente guardado o importar uno del taller.
-
-
Paso 4: Guarda tu monstruo y úsalo en tus batallas. Puedes ponerle un nombre a tu monstruo y darle una descripción. También puedes asignarle una categoría y un precio. También puedes probar a tu monstruo en un test arena antes de guardarlo.
-
-
¡Felicidades! Has creado tu propio monstruo en Animal Revolt Battle Simulator. También puedes compartir tu monstruo con otros jugadores en el Steam Workshop o descargar sus creaciones.
-
Cómo descargar y usar contenido personalizado del taller de Steam
-
Si quieres mejorar tu experiencia con Animal Revolt Battle Simulator aún más, puedes descargar y usar contenido personalizado desde el Steam Workshop. Steam Workshop es una plataforma donde los jugadores pueden subir y descargar contenido generado por el usuario para varios juegos. Para Animal Revolt Battle Simulator, puedes encontrar muchos tipos de contenido en el Steam Workshop, como monstruos, mapas o edificios. También puedes encontrar algunas campañas y escenarios personalizados que otros jugadores han creado. Te mostramos cómo descargar y usar contenido personalizado del Taller de Steam:
-
-
Paso 1: Ir a la página de taller de vapor de Animal Revolt Battle Simulator. Puedes acceder desde el menú principal del juego o haciendo clic en here.
-
Paso 2: Navega y suscríbete al contenido que te gusta. Puedes usar los filtros y la barra de búsqueda para encontrar lo que estás buscando. También puede ordenar el contenido por popularidad, calificación, fecha o nombre. También puede ver los comentarios, calificaciones y capturas de pantalla de cada contenido.
-
Paso 3: Inicie el juego y encuentre el contenido en la carpeta del taller. Puede acceder a él desde el modo sandbox o el modo creador de la unidad. También puede editar o eliminar el contenido si lo desea.
-
-
¡Eso es todo! Has descargado y usado contenido personalizado del Taller de Steam. También puedes valorar y comentar el contenido que has utilizado o subir tus propias creaciones.
-
Consejos y trucos para Animal Revolt Battle Simulator
-
-
-
Consejo 1: Aprende las fortalezas y debilidades de cada criatura y úsalas estratégicamente. Por ejemplo, algunas criaturas son rápidas pero débiles, mientras que otras son lentas pero fuertes. Algunas criaturas pueden volar, nadar o trepar, mientras que otras no. Algunas criaturas tienen habilidades especiales, como aliento de fuego, saliva venenosa o poderes curativos, mientras que otras no. También puedes usar el panel de información para ver las estadísticas y detalles de cada criatura.
-
Consejo 2: Experimenta con diferentes combinaciones de criaturas y armas para crear batallas únicas. Por ejemplo, puedes intentar librar una batalla entre animales históricos, como mamuts, sabertooths y rinocerontes lanudos, o animales míticos, como unicornios, grifos e hidras. También puedes intentar hacer una batalla entre diferentes géneros, como ciencia ficción, fantasía o terror. También puedes intentar hacer una batalla entre diferentes tamaños, como gigantes contra enanos, o hormigas contra elefantes.
-
Consejo 3: Utilice los efectos de la física y ragdoll a su ventaja y divertirse viendo el caos. Por ejemplo, puedes usar explosivos para volar a tus enemigos o enviarlos a volar. También puedes usar cuerdas para atar a tus enemigos o moverlos. También puedes usar la gravedad para hacer que tus enemigos caigan o floten. También puedes usar efectos ambientales, como fuego, agua o viento, para afectar a tus enemigos.
-
-
Estos son solo algunos de los consejos y trucos que puedes usar en Animal Revolt Battle Simulator. También puedes descubrir más jugando el juego tú mismo.
-
-
Conclusión y preguntas frecuentes
-
-
Si tienes alguna pregunta sobre Animal Revolt Battle Simulator, puedes encontrar las respuestas en estas preguntas frecuentes:
A: Sí, Animal Revolt Battle Simulator es seguro para descargar desde el sitio web oficial o la página de Steam del juego. Sin embargo, debes tener cuidado al descargar contenido personalizado del Steam Workshop u otras fuentes, ya que podrían contener virus o malware.
A: No, Animal Revolt Battle Simulator no es multijugador en este momento. Sin embargo, los desarrolladores han declarado que están trabajando en agregar características multijugador en el futuro.
-
Q: ¿Es Animal Revolt Battle Simulator adecuado para niños?
-
A: Animal Revolt Battle Simulator está clasificado M para Maduro por ESRB y PEGI 16 por PEGI. Contiene violencia, sangre, sangre y humor crudo que podría no ser adecuado para niños menores de 17 años. li>Q: ¿Cómo puedo contactar a los desarrolladores de Animal Revolt Battle Simulator?
-
A: Puede ponerse en contacto con los desarrolladores de Animal Revolt Battle Simulator enviándoles un correo electrónico a animalrevoltbattlesimulator@gmail.com o uniéndose a su Discord server. También puede seguirlos en Twitter o Facebook para las últimas actualizaciones y noticias.
-
Q: ¿Cómo puedo apoyar el desarrollo de Animal Revolt Battle Simulator?
-
A: Puedes apoyar el desarrollo de Animal Revolt Battle Simulator comprando el juego en Steam o donando a los desarrolladores en Patreon. También puedes apoyarlos dejando una opinión positiva, valoración o comentario en Steam u otras plataformas. También puede compartir sus comentarios, sugerencias o informes de errores con ellos por correo electrónico o Discord.
-
Q: ¿Cómo puedo aprender más sobre Animal Revolt Battle Simulator?
-
-
-
Esperamos que haya disfrutado de este artículo y aprendido algo nuevo sobre Animal Revolt Battle Simulator. Si estás buscando un juego que te permita dar rienda suelta a tu creatividad e imaginación, entonces definitivamente deberías probar Animal Revolt Battle Simulator. ¡No te arrepentirás!
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Cps Refuerzo Apk Blockman Ir.md b/spaces/Benson/text-generation/Examples/Cps Refuerzo Apk Blockman Ir.md
deleted file mode 100644
index 84e14ad3779db3bcd7da08d4fc2825271a659092..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Cps Refuerzo Apk Blockman Ir.md
+++ /dev/null
@@ -1,101 +0,0 @@
-
-
¿Qué es CPS Booster APK Blockman ir y cómo usarlo?
-
Si usted es un fan de los juegos de estilo bloque y quiere mejorar su velocidad de clic y precisión, es posible que esté interesado en CPS Booster APK Blockman Go. Esta es una aplicación que afirma aumentar sus clics por segundo (CPS) en Blockman Go, un popular juego de caja de arena que ofrece varios mini-juegos, salas de chat y modos creativos. Pero ¿qué es exactamente CPS Booster APK Blockman Go y cómo funciona? En este artículo, explicaremos todo lo que necesitas saber sobre esta aplicación, incluyendo cómo descargarla, instalarla y usarla, así como sus pros y contras.
-
Introducción
-
Antes de sumergirnos en los detalles de CPS Booster APK Blockman Go, primero vamos a entender algunos conceptos básicos.
¿Qué es CPS y por qué es importante en los juegos?
-
CPS significa clics por segundo, que es una medida de lo rápido que puede hacer clic en el botón del ratón. También se conoce como CPS Test o Click Speed Test. Este juego es utilizado principalmente por los jugadores para determinar su velocidad de clic y precisión. También puede usarse como una forma divertida de desafiarte a ti mismo o a tus amigos.
-
CPS es importante en los juegos porque puede afectar su rendimiento y resultado en ciertos juegos que requieren un clic rápido y preciso. Por ejemplo, en los juegos de disparos en primera persona, necesitas hacer clic de forma rápida y precisa para disparar a tus enemigos. En los juegos de estrategia, debes hacer clic rápida e inteligentemente para administrar tus recursos y unidades. En los juegos de sandbox, necesitas hacer clic creativa y eficientemente para construir tu propio mundo.
-
¿Qué es Blockman Go y cuáles son sus características?
-
Blockman Go es un juego de árcade desarrollado por Blockman GO Studio. Es una aplicación gratuita que incluye minijuegos, chat y hacer amigos. Puedes jugar varios minijuegos de estilo bloque aquí. Algunos de los juegos populares son Bed Wars, Sky Wars, Egg War, Murder Mystery, Survival Games, etc. También puedes crear tus propios juegos usando el modo creativo.
-
-
-
Varios juegos: Puede elegir entre diferentes géneros de juegos que permiten a varios jugadores jugar juntos y actualizar continuamente los juegos. Puedes unirte al juego con un simple toque.
-
Avatares personalizables: Puedes vestir a tu avatar con varios atuendos y accesorios. También puedes obtener decoraciones únicas basadas en tu nivel.
-
Sistema de chat: Puedes chatear con otros jugadores usando mensajes de texto o voz. También puedes crear o unirte a un clan para comunicarte con tus amigos.
-
Recompensas de oro: Puedes ganar oro jugando o completando tareas. Puedes usar oro para comprar artículos o intercambiar regalos con otros.
-
Garena versión: Si usted está en el sudeste de Asia, se puede descargar Garena Blockman GO, que es una versión localizada del juego que ofrece más contenido regional y eventos
¿Qué es CPS Booster APK y qué hace?
-
CPS Booster APK es una aplicación que afirma aumentar su CPS en Blockman Go mediante el uso de un algoritmo especial que simula un clic rápido y preciso. No es una aplicación oficial de Blockman GO Studio, sino una aplicación de terceros que requiere que lo descargues e instales desde una fuente externa. La aplicación está diseñada para funcionar con Blockman Go y otros juegos de estilo bloque que requieren altos CPS.
-
CPS Booster APK funciona superponiendo un botón transparente en la pantalla que puede tocar para activar el amplificador. El amplificador hará clic automáticamente para usted a una velocidad y frecuencia que puede ajustar en la configuración. También puede personalizar el tamaño, la posición y el color del botón. La aplicación también te muestra un contador que muestra tu CPS actual y el CPS promedio del amplificador.
-
Cómo descargar e instalar CPS Booster APK Blockman Go
-
Si desea probar CPS Booster APK Blockman Go, debe seguir estos pasos para descargarlo e instalarlo en su dispositivo.
-
-
Dónde encontrar el archivo APK y cómo verificar su seguridad
-
-
Para verificar la seguridad del archivo APK, puede usar una herramienta en línea como [VirusTotal]( 1 ) o [APKPure]( 2 ) para escanear el archivo en busca de código malicioso o comportamiento sospechoso. También puede comprobar las revisiones y calificaciones de la aplicación de otros usuarios que la han descargado antes. Solo debes descargar el archivo APK de una fuente confiable y confiable.
-
Cómo habilitar fuentes desconocidas e instalar el archivo APK
-
De forma predeterminada, los dispositivos Android no permiten instalar aplicaciones desde fuentes distintas de Google Play Store. Esta es una medida de seguridad para evitar que aplicaciones no autorizadas o dañinas accedan a su dispositivo. Sin embargo, puede habilitar fuentes desconocidas en la configuración del dispositivo para permitir la instalación de aplicaciones desde otras fuentes.
-
Para habilitar fuentes desconocidas, debe ir a la configuración de su dispositivo y buscar la opción que dice "Seguridad" o "Privacidad". Luego, debe encontrar la opción que dice "Fuentes desconocidas" o "Instalar aplicaciones desconocidas" y activarla. Es posible que vea un mensaje de advertencia que le informa sobre los riesgos de instalar aplicaciones de fuentes desconocidas. Debe aceptar la advertencia y proceder con la instalación.
-
Para instalar el archivo APK, es necesario localizar el archivo en el almacenamiento del dispositivo y toque en él. Es posible que vea una ventana emergente que le solicite permisos para acceder a las funciones de su dispositivo, como almacenamiento, cámara, micrófono, etc. Debe conceder estos permisos para que la aplicación funcione correctamente. Luego, debe seguir las instrucciones en pantalla para completar la instalación.
Cómo iniciar la aplicación y conceder permisos
-
Después de instalar el archivo APK, puede iniciar la aplicación tocando en su icono en la pantalla de inicio o cajón de aplicaciones. Puede ver una pantalla de bienvenida que muestra el logotipo y el nombre de la aplicación. Luego, puede ver una pantalla de bienvenida que presenta las características de la aplicación y le pide que conceda algunos permisos. Necesitas conceder estos permisos para que la aplicación funcione correctamente.
-
-
-
Permiso de superposición: Esto permite que la aplicación muestre un botón transparente en la pantalla que puede tocar para activar el amplificador.
-
Servicio de accesibilidad: Esto permite que la aplicación simule un clic rápido y preciso para usted.
-
Estadísticas de uso: Esto permite que la aplicación monitoree tu CPS y te muestre un contador.
-
-
Puede conceder estos permisos siguiendo las instrucciones en la pantalla. También puede cambiar o revocar estos permisos más adelante en la configuración del dispositivo.
-
Cómo aumentar la CPS en Blockman Go con CPS Booster APK
-
Ahora que ha descargado, instalado, y lanzado CPS Booster APK Blockman Go, se puede utilizar para aumentar su CPS en Blockman Go. Estos son los pasos para hacerlo.
-
Cómo seleccionar el modo de juego y ajustar la configuración
-
Lo primero que tienes que hacer es seleccionar el modo de juego que quieres jugar en Blockman Go. Puedes hacer esto tocando el botón "Modo de juego" en la pantalla principal de la aplicación. Verás una lista de modos de juego que son compatibles con la aplicación, como Bed Wars, Sky Wars, Egg War, etc. Puedes pulsar en cualquier modo de juego para seleccionarlo.
-
Entonces, es necesario ajustar la configuración de la aplicación de acuerdo a sus preferencias y necesidades. Puedes hacer esto tocando el botón "Configuración" en la pantalla principal de la aplicación. Verás un menú que te permite cambiar varias opciones, como:
-
-
CPS: Esto le permite establecer el CPS objetivo que desea que el booster alcance para usted. Puede elegir entre 5 a 100 CPS.
-
Click Interval: Esto le permite establecer el intervalo de tiempo entre cada clic que el amplificador realizará por usted. Puede elegir entre 10 a 200 milisegundos.
-
Click Duration: Esto le permite establecer la duración de cada clic que el booster realizará para usted. Puede elegir entre 10 a 200 milisegundos.
-
-
-
Puede ajustar estos ajustes deslizando las barras o tocando los botones. También puede restablecer esta configuración por defecto pulsando en el botón "Restablecer".
Cómo activar el amplificador y supervisar el rendimiento
-
Después de seleccionar el modo de juego y ajustar la configuración, usted está listo para activar el amplificador y empezar a jugar Blockman Go. Puedes hacer esto tocando el botón "Inicio" en la pantalla principal de la aplicación. Verá un botón transparente en la pantalla que puede tocar para activar el amplificador. El botón cambiará de color dependiendo del modo de clic que haya elegido.
-
Si ha elegido el modo "Siempre encendido", el botón será verde y el refuerzo hará clic continuamente para usted. Si ha elegido el modo "Toque para hacer clic", el botón será azul y el refuerzo hará clic intermitentemente para usted. Puede tocar el botón de nuevo para detener el amplificador.
-
También puede monitorear el rendimiento del amplificador mirando el contador que muestra su CPS actual y el CPS promedio del amplificador. También puede ver un gráfico que muestra las fluctuaciones de su CPS a lo largo del tiempo. Puede acceder a estas funciones pulsando en el botón "Rendimiento" en la pantalla principal de la aplicación.
-
Cómo personalizar el booster y optimizar los resultados
-
Si desea personalizar el refuerzo y optimizar los resultados, puede utilizar algunas de las funciones avanzadas que ofrece la aplicación. Puede acceder a estas funciones pulsando el botón "Avanzado" en la pantalla principal de la aplicación. Algunas de estas características son:
-
-
Tamaño del botón: Esto le permite cambiar el tamaño del botón transparente que puede tocar para activar el amplificador. Puedes elegir entre pequeñas, medianas o grandes.
-
Posición del botón: Esto le permite cambiar la posición del botón transparente que puede tocar para activar el amplificador. Puede arrastrar y soltar el botón en cualquier lugar de la pantalla.
-
-
Auto Clicker: Esto le permite habilitar o deshabilitar un auto clicker que hará clic para usted automáticamente sin tocar el botón. Puede establecer un temporizador para el tiempo que desea que el clicker automático se ejecute.
-
Randomizer: Esto le permite habilitar o deshabilitar un randomizer que variará su CPS aleatoriamente dentro de un rango que puede establecer. Esto puede hacer que su clic sea más natural y menos detectable.
-
-
Puede personalizar estas características deslizando las barras, tocando los botones o introduciendo los valores. También puede restablecer estas características por defecto pulsando el botón "Restablecer".
-
Pros y contras de usar CPS Booster APK Blockman Go
-
CPS Booster APK Blockman Go es una aplicación que tiene sus pros y sus contras. Aquí están algunos de ellos.
-
Las ventajas de usar la aplicación, como un clic más rápido, mejor precisión y más diversión
-
Algunas de las ventajas de usar CPS Booster APK Blockman Go son:
-
-
Clic más rápido: La aplicación puede ayudarle a aumentar su CPS en Blockman Go mediante el uso de un algoritmo especial que simula un clic rápido y preciso. Esto puede darte una ventaja sobre tus oponentes y mejorar tu rendimiento y resultado en ciertos juegos.
-
Mejor precisión: La aplicación puede ayudarle a mejorar su precisión en Blockman Go mediante el uso de un botón transparente que puede tocar para activar el amplificador. Esto puede reducir la fatiga de tu mano y prevenir clics accidentales que pueden arruinar tu juego.
-
Más divertido: La aplicación puede ayudarle a divertirse más en Blockman Go al permitirle personalizar y optimizar su experiencia de clic. Puedes elegir entre diferentes modos de juego, configuraciones y características que se adapten a tus preferencias y necesidades.
-
-
Las desventajas de usar la aplicación, como los riesgos potenciales, problemas de compatibilidad y preocupaciones éticas
-
Algunas de las desventajas de usar CPS Booster APK Blockman Go son:
-
-
-
Problemas de compatibilidad: La aplicación puede no funcionar bien con algunos dispositivos o versiones de Blockman Go. Puede causar fallos, fallas o errores que pueden afectar el juego o el rendimiento del dispositivo. Siempre debes comprobar si tu dispositivo o juego es compatible con la aplicación antes de usarla.
-
Preocupaciones éticas: La aplicación puede darte una ventaja injusta sobre otros jugadores que no la usan. También puede violar algunas reglas o términos de servicio de Blockman Go u otros juegos de estilo bloque. Siempre debes respetar a otros jugadores y jugar de forma justa y responsable.
-
Conclusión
-
CPS Booster APK Blockman Go es una aplicación que puede ayudar a aumentar su CPS en Blockman Go y otros juegos de estilo bloque. También puede ayudarle a mejorar su precisión y divertirse más al permitirle personalizar y optimizar su experiencia de clic. Sin embargo, la aplicación también tiene algunos inconvenientes, como riesgos potenciales, problemas de compatibilidad y preocupaciones éticas. Siempre debe tener cuidado al descargar e instalar la aplicación desde fuentes desconocidas, y usarla bajo su propio riesgo y responsabilidad. También debes respetar a otros jugadores y jugar de forma justa y responsable.
-
Si usted está buscando una manera de aumentar su CPS en Blockman Go y disfrutar del juego más, usted puede dar CPS Booster APK Blockman Go una oportunidad. Sin embargo, si usted está buscando una manera más segura, confiable y ética para mejorar su CPS, es posible que desee considerar otras alternativas, como practicar sus habilidades de clic, usar un ratón de juego o unirse a un programa de entrenamiento.
-
Preguntas frecuentes
-
Aquí están algunas de las preguntas y respuestas frecuentes sobre CPS Booster APK Blockman Go.
-
¿Es seguro usar CPS Booster APK Blockman?
-
-
Es CPS Booster APK Blockman Ir legal de usar?
-
CPS Booster APK Blockman Go puede no ser legal de usar en algunos países o regiones, ya que puede violar algunas leyes o reglamentos que prohíben el uso de aplicaciones o software no autorizados o modificados. También puede violar algunas reglas o términos de servicio de Blockman Go u otros juegos de estilo bloque que prohíben el uso de trucos, hacks o exploits. Siempre debes comprobar la legalidad de la aplicación en tu ubicación antes de usarla. También debe respetar los derechos y la propiedad de los desarrolladores y editores de los juegos.
-
Es CPS Booster APK Blockman Go detectable por el juego?
-
CPS Booster APK Blockman Go puede ser detectable por el juego u otros jugadores, ya que puede causar comportamiento anormal o sospechoso o resultados en su juego. Por ejemplo, puede hacer que su velocidad de clic o precisión demasiado alta o demasiado baja, o puede hacer que su patrón de clic sea demasiado regular o demasiado aleatorio. Esto puede levantar algunas banderas o alertas que pueden llevar a que su cuenta sea prohibida o suspendida. Siempre debes tener cuidado al usar la aplicación y evitar usarla excesivamente o descaradamente.
-
¿Funciona CPS Booster APK Blockman con otros juegos?
-
CPS Booster APK Blockman Go está diseñado para trabajar con Blockman Go y otros juegos de estilo bloque que requieren un alto CPS. Sin embargo, es posible que no funcione bien con algunos juegos que tienen diferentes mecánicas o características que pueden interferir con la funcionalidad de la aplicación. Por ejemplo, es posible que no funcione con juegos que tienen sistemas anti-trucos, controles táctiles o sensores de movimiento. Siempre debes probar la aplicación con otros juegos antes de usarla.
Si desea desinstalar CPS Booster APK Blockman Ir desde su dispositivo, puede seguir estos pasos:
-
-
Ir a la configuración del dispositivo y buscar la opción que dice "Aplicaciones" o "Aplicaciones".
-
Encontrar y toque en "CPS Booster APK" de la lista de aplicaciones.
-
-
Espere a que la aplicación se elimine de su dispositivo.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/BetterAPI/BetterChat/src/lib/stores/pendingMessage.ts b/spaces/BetterAPI/BetterChat/src/lib/stores/pendingMessage.ts
deleted file mode 100644
index f28d7aaf9995f9848f6c7988503c20a08d81d97c..0000000000000000000000000000000000000000
--- a/spaces/BetterAPI/BetterChat/src/lib/stores/pendingMessage.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-import { writable } from "svelte/store";
-
-export const pendingMessage = writable("");
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/ec2/createtags.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/ec2/createtags.py
deleted file mode 100644
index ec0ff1a685b1a27a4871d1a505cb9daf331f8a41..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/ec2/createtags.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"). You
-# may not use this file except in compliance with the License. A copy of
-# the License is located at
-#
-# https://aws.amazon.com/apache2.0/
-#
-# or in the "license" file accompanying this file. This file is
-# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
-# ANY KIND, either express or implied. See the License for the specific
-# language governing permissions and limitations under the License.
-
-
-def inject_create_tags(event_name, class_attributes, **kwargs):
- """This injects a custom create_tags method onto the ec2 service resource
-
- This is needed because the resource model is not able to express
- creating multiple tag resources based on the fact you can apply a set
- of tags to multiple ec2 resources.
- """
- class_attributes['create_tags'] = create_tags
-
-
-def create_tags(self, **kwargs):
- # Call the client method
- self.meta.client.create_tags(**kwargs)
- resources = kwargs.get('Resources', [])
- tags = kwargs.get('Tags', [])
- tag_resources = []
-
- # Generate all of the tag resources that just were created with the
- # preceding client call.
- for resource in resources:
- for tag in tags:
- # Add each tag from the tag set for each resource to the list
- # that is returned by the method.
- tag_resource = self.Tag(resource, tag['Key'], tag['Value'])
- tag_resources.append(tag_resource)
- return tag_resources
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/install.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/install.py
deleted file mode 100644
index 3c15ed4158c35bc43610aa5745364a0e865434eb..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/install.py
+++ /dev/null
@@ -1,775 +0,0 @@
-import errno
-import json
-import operator
-import os
-import shutil
-import site
-from optparse import SUPPRESS_HELP, Values
-from typing import List, Optional
-
-from pip._vendor.rich import print_json
-
-from pip._internal.cache import WheelCache
-from pip._internal.cli import cmdoptions
-from pip._internal.cli.cmdoptions import make_target_python
-from pip._internal.cli.req_command import (
- RequirementCommand,
- warn_if_run_as_root,
- with_cleanup,
-)
-from pip._internal.cli.status_codes import ERROR, SUCCESS
-from pip._internal.exceptions import CommandError, InstallationError
-from pip._internal.locations import get_scheme
-from pip._internal.metadata import get_environment
-from pip._internal.models.installation_report import InstallationReport
-from pip._internal.operations.build.build_tracker import get_build_tracker
-from pip._internal.operations.check import ConflictDetails, check_install_conflicts
-from pip._internal.req import install_given_reqs
-from pip._internal.req.req_install import (
- InstallRequirement,
- check_legacy_setup_py_options,
-)
-from pip._internal.utils.compat import WINDOWS
-from pip._internal.utils.filesystem import test_writable_dir
-from pip._internal.utils.logging import getLogger
-from pip._internal.utils.misc import (
- check_externally_managed,
- ensure_dir,
- get_pip_version,
- protect_pip_from_modification_on_windows,
- write_output,
-)
-from pip._internal.utils.temp_dir import TempDirectory
-from pip._internal.utils.virtualenv import (
- running_under_virtualenv,
- virtualenv_no_global,
-)
-from pip._internal.wheel_builder import build, should_build_for_install_command
-
-logger = getLogger(__name__)
-
-
-class InstallCommand(RequirementCommand):
- """
- Install packages from:
-
- - PyPI (and other indexes) using requirement specifiers.
- - VCS project urls.
- - Local project directories.
- - Local or remote source archives.
-
- pip also supports installing from "requirements files", which provide
- an easy way to specify a whole environment to be installed.
- """
-
- usage = """
- %prog [options] [package-index-options] ...
- %prog [options] -r [package-index-options] ...
- %prog [options] [-e] ...
- %prog [options] [-e] ...
- %prog [options] ..."""
-
- def add_options(self) -> None:
- self.cmd_opts.add_option(cmdoptions.requirements())
- self.cmd_opts.add_option(cmdoptions.constraints())
- self.cmd_opts.add_option(cmdoptions.no_deps())
- self.cmd_opts.add_option(cmdoptions.pre())
-
- self.cmd_opts.add_option(cmdoptions.editable())
- self.cmd_opts.add_option(
- "--dry-run",
- action="store_true",
- dest="dry_run",
- default=False,
- help=(
- "Don't actually install anything, just print what would be. "
- "Can be used in combination with --ignore-installed "
- "to 'resolve' the requirements."
- ),
- )
- self.cmd_opts.add_option(
- "-t",
- "--target",
- dest="target_dir",
- metavar="dir",
- default=None,
- help=(
- "Install packages into . "
- "By default this will not replace existing files/folders in "
- ". Use --upgrade to replace existing packages in "
- "with new versions."
- ),
- )
- cmdoptions.add_target_python_options(self.cmd_opts)
-
- self.cmd_opts.add_option(
- "--user",
- dest="use_user_site",
- action="store_true",
- help=(
- "Install to the Python user install directory for your "
- "platform. Typically ~/.local/, or %APPDATA%\\Python on "
- "Windows. (See the Python documentation for site.USER_BASE "
- "for full details.)"
- ),
- )
- self.cmd_opts.add_option(
- "--no-user",
- dest="use_user_site",
- action="store_false",
- help=SUPPRESS_HELP,
- )
- self.cmd_opts.add_option(
- "--root",
- dest="root_path",
- metavar="dir",
- default=None,
- help="Install everything relative to this alternate root directory.",
- )
- self.cmd_opts.add_option(
- "--prefix",
- dest="prefix_path",
- metavar="dir",
- default=None,
- help=(
- "Installation prefix where lib, bin and other top-level "
- "folders are placed. Note that the resulting installation may "
- "contain scripts and other resources which reference the "
- "Python interpreter of pip, and not that of ``--prefix``. "
- "See also the ``--python`` option if the intention is to "
- "install packages into another (possibly pip-free) "
- "environment."
- ),
- )
-
- self.cmd_opts.add_option(cmdoptions.src())
-
- self.cmd_opts.add_option(
- "-U",
- "--upgrade",
- dest="upgrade",
- action="store_true",
- help=(
- "Upgrade all specified packages to the newest available "
- "version. The handling of dependencies depends on the "
- "upgrade-strategy used."
- ),
- )
-
- self.cmd_opts.add_option(
- "--upgrade-strategy",
- dest="upgrade_strategy",
- default="only-if-needed",
- choices=["only-if-needed", "eager"],
- help=(
- "Determines how dependency upgrading should be handled "
- "[default: %default]. "
- '"eager" - dependencies are upgraded regardless of '
- "whether the currently installed version satisfies the "
- "requirements of the upgraded package(s). "
- '"only-if-needed" - are upgraded only when they do not '
- "satisfy the requirements of the upgraded package(s)."
- ),
- )
-
- self.cmd_opts.add_option(
- "--force-reinstall",
- dest="force_reinstall",
- action="store_true",
- help="Reinstall all packages even if they are already up-to-date.",
- )
-
- self.cmd_opts.add_option(
- "-I",
- "--ignore-installed",
- dest="ignore_installed",
- action="store_true",
- help=(
- "Ignore the installed packages, overwriting them. "
- "This can break your system if the existing package "
- "is of a different version or was installed "
- "with a different package manager!"
- ),
- )
-
- self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
- self.cmd_opts.add_option(cmdoptions.no_build_isolation())
- self.cmd_opts.add_option(cmdoptions.use_pep517())
- self.cmd_opts.add_option(cmdoptions.no_use_pep517())
- self.cmd_opts.add_option(cmdoptions.check_build_deps())
- self.cmd_opts.add_option(cmdoptions.override_externally_managed())
-
- self.cmd_opts.add_option(cmdoptions.config_settings())
- self.cmd_opts.add_option(cmdoptions.global_options())
-
- self.cmd_opts.add_option(
- "--compile",
- action="store_true",
- dest="compile",
- default=True,
- help="Compile Python source files to bytecode",
- )
-
- self.cmd_opts.add_option(
- "--no-compile",
- action="store_false",
- dest="compile",
- help="Do not compile Python source files to bytecode",
- )
-
- self.cmd_opts.add_option(
- "--no-warn-script-location",
- action="store_false",
- dest="warn_script_location",
- default=True,
- help="Do not warn when installing scripts outside PATH",
- )
- self.cmd_opts.add_option(
- "--no-warn-conflicts",
- action="store_false",
- dest="warn_about_conflicts",
- default=True,
- help="Do not warn about broken dependencies",
- )
- self.cmd_opts.add_option(cmdoptions.no_binary())
- self.cmd_opts.add_option(cmdoptions.only_binary())
- self.cmd_opts.add_option(cmdoptions.prefer_binary())
- self.cmd_opts.add_option(cmdoptions.require_hashes())
- self.cmd_opts.add_option(cmdoptions.progress_bar())
- self.cmd_opts.add_option(cmdoptions.root_user_action())
-
- index_opts = cmdoptions.make_option_group(
- cmdoptions.index_group,
- self.parser,
- )
-
- self.parser.insert_option_group(0, index_opts)
- self.parser.insert_option_group(0, self.cmd_opts)
-
- self.cmd_opts.add_option(
- "--report",
- dest="json_report_file",
- metavar="file",
- default=None,
- help=(
- "Generate a JSON file describing what pip did to install "
- "the provided requirements. "
- "Can be used in combination with --dry-run and --ignore-installed "
- "to 'resolve' the requirements. "
- "When - is used as file name it writes to stdout. "
- "When writing to stdout, please combine with the --quiet option "
- "to avoid mixing pip logging output with JSON output."
- ),
- )
-
- @with_cleanup
- def run(self, options: Values, args: List[str]) -> int:
- if options.use_user_site and options.target_dir is not None:
- raise CommandError("Can not combine '--user' and '--target'")
-
- # Check whether the environment we're installing into is externally
- # managed, as specified in PEP 668. Specifying --root, --target, or
- # --prefix disables the check, since there's no reliable way to locate
- # the EXTERNALLY-MANAGED file for those cases. An exception is also
- # made specifically for "--dry-run --report" for convenience.
- installing_into_current_environment = (
- not (options.dry_run and options.json_report_file)
- and options.root_path is None
- and options.target_dir is None
- and options.prefix_path is None
- )
- if (
- installing_into_current_environment
- and not options.override_externally_managed
- ):
- check_externally_managed()
-
- upgrade_strategy = "to-satisfy-only"
- if options.upgrade:
- upgrade_strategy = options.upgrade_strategy
-
- cmdoptions.check_dist_restriction(options, check_target=True)
-
- logger.verbose("Using %s", get_pip_version())
- options.use_user_site = decide_user_install(
- options.use_user_site,
- prefix_path=options.prefix_path,
- target_dir=options.target_dir,
- root_path=options.root_path,
- isolated_mode=options.isolated_mode,
- )
-
- target_temp_dir: Optional[TempDirectory] = None
- target_temp_dir_path: Optional[str] = None
- if options.target_dir:
- options.ignore_installed = True
- options.target_dir = os.path.abspath(options.target_dir)
- if (
- # fmt: off
- os.path.exists(options.target_dir) and
- not os.path.isdir(options.target_dir)
- # fmt: on
- ):
- raise CommandError(
- "Target path exists but is not a directory, will not continue."
- )
-
- # Create a target directory for using with the target option
- target_temp_dir = TempDirectory(kind="target")
- target_temp_dir_path = target_temp_dir.path
- self.enter_context(target_temp_dir)
-
- global_options = options.global_options or []
-
- session = self.get_default_session(options)
-
- target_python = make_target_python(options)
- finder = self._build_package_finder(
- options=options,
- session=session,
- target_python=target_python,
- ignore_requires_python=options.ignore_requires_python,
- )
- build_tracker = self.enter_context(get_build_tracker())
-
- directory = TempDirectory(
- delete=not options.no_clean,
- kind="install",
- globally_managed=True,
- )
-
- try:
- reqs = self.get_requirements(args, options, finder, session)
- check_legacy_setup_py_options(options, reqs)
-
- wheel_cache = WheelCache(options.cache_dir)
-
- # Only when installing is it permitted to use PEP 660.
- # In other circumstances (pip wheel, pip download) we generate
- # regular (i.e. non editable) metadata and wheels.
- for req in reqs:
- req.permit_editable_wheels = True
-
- preparer = self.make_requirement_preparer(
- temp_build_dir=directory,
- options=options,
- build_tracker=build_tracker,
- session=session,
- finder=finder,
- use_user_site=options.use_user_site,
- verbosity=self.verbosity,
- )
- resolver = self.make_resolver(
- preparer=preparer,
- finder=finder,
- options=options,
- wheel_cache=wheel_cache,
- use_user_site=options.use_user_site,
- ignore_installed=options.ignore_installed,
- ignore_requires_python=options.ignore_requires_python,
- force_reinstall=options.force_reinstall,
- upgrade_strategy=upgrade_strategy,
- use_pep517=options.use_pep517,
- )
-
- self.trace_basic_info(finder)
-
- requirement_set = resolver.resolve(
- reqs, check_supported_wheels=not options.target_dir
- )
-
- if options.json_report_file:
- report = InstallationReport(requirement_set.requirements_to_install)
- if options.json_report_file == "-":
- print_json(data=report.to_dict())
- else:
- with open(options.json_report_file, "w", encoding="utf-8") as f:
- json.dump(report.to_dict(), f, indent=2, ensure_ascii=False)
-
- if options.dry_run:
- would_install_items = sorted(
- (r.metadata["name"], r.metadata["version"])
- for r in requirement_set.requirements_to_install
- )
- if would_install_items:
- write_output(
- "Would install %s",
- " ".join("-".join(item) for item in would_install_items),
- )
- return SUCCESS
-
- try:
- pip_req = requirement_set.get_requirement("pip")
- except KeyError:
- modifying_pip = False
- else:
- # If we're not replacing an already installed pip,
- # we're not modifying it.
- modifying_pip = pip_req.satisfied_by is None
- protect_pip_from_modification_on_windows(modifying_pip=modifying_pip)
-
- reqs_to_build = [
- r
- for r in requirement_set.requirements.values()
- if should_build_for_install_command(r)
- ]
-
- _, build_failures = build(
- reqs_to_build,
- wheel_cache=wheel_cache,
- verify=True,
- build_options=[],
- global_options=global_options,
- )
-
- if build_failures:
- raise InstallationError(
- "Could not build wheels for {}, which is required to "
- "install pyproject.toml-based projects".format(
- ", ".join(r.name for r in build_failures) # type: ignore
- )
- )
-
- to_install = resolver.get_installation_order(requirement_set)
-
- # Check for conflicts in the package set we're installing.
- conflicts: Optional[ConflictDetails] = None
- should_warn_about_conflicts = (
- not options.ignore_dependencies and options.warn_about_conflicts
- )
- if should_warn_about_conflicts:
- conflicts = self._determine_conflicts(to_install)
-
- # Don't warn about script install locations if
- # --target or --prefix has been specified
- warn_script_location = options.warn_script_location
- if options.target_dir or options.prefix_path:
- warn_script_location = False
-
- installed = install_given_reqs(
- to_install,
- global_options,
- root=options.root_path,
- home=target_temp_dir_path,
- prefix=options.prefix_path,
- warn_script_location=warn_script_location,
- use_user_site=options.use_user_site,
- pycompile=options.compile,
- )
-
- lib_locations = get_lib_location_guesses(
- user=options.use_user_site,
- home=target_temp_dir_path,
- root=options.root_path,
- prefix=options.prefix_path,
- isolated=options.isolated_mode,
- )
- env = get_environment(lib_locations)
-
- installed.sort(key=operator.attrgetter("name"))
- items = []
- for result in installed:
- item = result.name
- try:
- installed_dist = env.get_distribution(item)
- if installed_dist is not None:
- item = f"{item}-{installed_dist.version}"
- except Exception:
- pass
- items.append(item)
-
- if conflicts is not None:
- self._warn_about_conflicts(
- conflicts,
- resolver_variant=self.determine_resolver_variant(options),
- )
-
- installed_desc = " ".join(items)
- if installed_desc:
- write_output(
- "Successfully installed %s",
- installed_desc,
- )
- except OSError as error:
- show_traceback = self.verbosity >= 1
-
- message = create_os_error_message(
- error,
- show_traceback,
- options.use_user_site,
- )
- logger.error(message, exc_info=show_traceback) # noqa
-
- return ERROR
-
- if options.target_dir:
- assert target_temp_dir
- self._handle_target_dir(
- options.target_dir, target_temp_dir, options.upgrade
- )
- if options.root_user_action == "warn":
- warn_if_run_as_root()
- return SUCCESS
-
- def _handle_target_dir(
- self, target_dir: str, target_temp_dir: TempDirectory, upgrade: bool
- ) -> None:
- ensure_dir(target_dir)
-
- # Checking both purelib and platlib directories for installed
- # packages to be moved to target directory
- lib_dir_list = []
-
- # Checking both purelib and platlib directories for installed
- # packages to be moved to target directory
- scheme = get_scheme("", home=target_temp_dir.path)
- purelib_dir = scheme.purelib
- platlib_dir = scheme.platlib
- data_dir = scheme.data
-
- if os.path.exists(purelib_dir):
- lib_dir_list.append(purelib_dir)
- if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
- lib_dir_list.append(platlib_dir)
- if os.path.exists(data_dir):
- lib_dir_list.append(data_dir)
-
- for lib_dir in lib_dir_list:
- for item in os.listdir(lib_dir):
- if lib_dir == data_dir:
- ddir = os.path.join(data_dir, item)
- if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
- continue
- target_item_dir = os.path.join(target_dir, item)
- if os.path.exists(target_item_dir):
- if not upgrade:
- logger.warning(
- "Target directory %s already exists. Specify "
- "--upgrade to force replacement.",
- target_item_dir,
- )
- continue
- if os.path.islink(target_item_dir):
- logger.warning(
- "Target directory %s already exists and is "
- "a link. pip will not automatically replace "
- "links, please remove if replacement is "
- "desired.",
- target_item_dir,
- )
- continue
- if os.path.isdir(target_item_dir):
- shutil.rmtree(target_item_dir)
- else:
- os.remove(target_item_dir)
-
- shutil.move(os.path.join(lib_dir, item), target_item_dir)
-
- def _determine_conflicts(
- self, to_install: List[InstallRequirement]
- ) -> Optional[ConflictDetails]:
- try:
- return check_install_conflicts(to_install)
- except Exception:
- logger.exception(
- "Error while checking for conflicts. Please file an issue on "
- "pip's issue tracker: https://github.com/pypa/pip/issues/new"
- )
- return None
-
- def _warn_about_conflicts(
- self, conflict_details: ConflictDetails, resolver_variant: str
- ) -> None:
- package_set, (missing, conflicting) = conflict_details
- if not missing and not conflicting:
- return
-
- parts: List[str] = []
- if resolver_variant == "legacy":
- parts.append(
- "pip's legacy dependency resolver does not consider dependency "
- "conflicts when selecting packages. This behaviour is the "
- "source of the following dependency conflicts."
- )
- else:
- assert resolver_variant == "2020-resolver"
- parts.append(
- "pip's dependency resolver does not currently take into account "
- "all the packages that are installed. This behaviour is the "
- "source of the following dependency conflicts."
- )
-
- # NOTE: There is some duplication here, with commands/check.py
- for project_name in missing:
- version = package_set[project_name][0]
- for dependency in missing[project_name]:
- message = (
- "{name} {version} requires {requirement}, "
- "which is not installed."
- ).format(
- name=project_name,
- version=version,
- requirement=dependency[1],
- )
- parts.append(message)
-
- for project_name in conflicting:
- version = package_set[project_name][0]
- for dep_name, dep_version, req in conflicting[project_name]:
- message = (
- "{name} {version} requires {requirement}, but {you} have "
- "{dep_name} {dep_version} which is incompatible."
- ).format(
- name=project_name,
- version=version,
- requirement=req,
- dep_name=dep_name,
- dep_version=dep_version,
- you=("you" if resolver_variant == "2020-resolver" else "you'll"),
- )
- parts.append(message)
-
- logger.critical("\n".join(parts))
-
-
-def get_lib_location_guesses(
- user: bool = False,
- home: Optional[str] = None,
- root: Optional[str] = None,
- isolated: bool = False,
- prefix: Optional[str] = None,
-) -> List[str]:
- scheme = get_scheme(
- "",
- user=user,
- home=home,
- root=root,
- isolated=isolated,
- prefix=prefix,
- )
- return [scheme.purelib, scheme.platlib]
-
-
-def site_packages_writable(root: Optional[str], isolated: bool) -> bool:
- return all(
- test_writable_dir(d)
- for d in set(get_lib_location_guesses(root=root, isolated=isolated))
- )
-
-
-def decide_user_install(
- use_user_site: Optional[bool],
- prefix_path: Optional[str] = None,
- target_dir: Optional[str] = None,
- root_path: Optional[str] = None,
- isolated_mode: bool = False,
-) -> bool:
- """Determine whether to do a user install based on the input options.
-
- If use_user_site is False, no additional checks are done.
- If use_user_site is True, it is checked for compatibility with other
- options.
- If use_user_site is None, the default behaviour depends on the environment,
- which is provided by the other arguments.
- """
- # In some cases (config from tox), use_user_site can be set to an integer
- # rather than a bool, which 'use_user_site is False' wouldn't catch.
- if (use_user_site is not None) and (not use_user_site):
- logger.debug("Non-user install by explicit request")
- return False
-
- if use_user_site:
- if prefix_path:
- raise CommandError(
- "Can not combine '--user' and '--prefix' as they imply "
- "different installation locations"
- )
- if virtualenv_no_global():
- raise InstallationError(
- "Can not perform a '--user' install. User site-packages "
- "are not visible in this virtualenv."
- )
- logger.debug("User install by explicit request")
- return True
-
- # If we are here, user installs have not been explicitly requested/avoided
- assert use_user_site is None
-
- # user install incompatible with --prefix/--target
- if prefix_path or target_dir:
- logger.debug("Non-user install due to --prefix or --target option")
- return False
-
- # If user installs are not enabled, choose a non-user install
- if not site.ENABLE_USER_SITE:
- logger.debug("Non-user install because user site-packages disabled")
- return False
-
- # If we have permission for a non-user install, do that,
- # otherwise do a user install.
- if site_packages_writable(root=root_path, isolated=isolated_mode):
- logger.debug("Non-user install because site-packages writeable")
- return False
-
- logger.info(
- "Defaulting to user installation because normal site-packages "
- "is not writeable"
- )
- return True
-
-
-def create_os_error_message(
- error: OSError, show_traceback: bool, using_user_site: bool
-) -> str:
- """Format an error message for an OSError
-
- It may occur anytime during the execution of the install command.
- """
- parts = []
-
- # Mention the error if we are not going to show a traceback
- parts.append("Could not install packages due to an OSError")
- if not show_traceback:
- parts.append(": ")
- parts.append(str(error))
- else:
- parts.append(".")
-
- # Spilt the error indication from a helper message (if any)
- parts[-1] += "\n"
-
- # Suggest useful actions to the user:
- # (1) using user site-packages or (2) verifying the permissions
- if error.errno == errno.EACCES:
- user_option_part = "Consider using the `--user` option"
- permissions_part = "Check the permissions"
-
- if not running_under_virtualenv() and not using_user_site:
- parts.extend(
- [
- user_option_part,
- " or ",
- permissions_part.lower(),
- ]
- )
- else:
- parts.append(permissions_part)
- parts.append(".\n")
-
- # Suggest the user to enable Long Paths if path length is
- # more than 260
- if (
- WINDOWS
- and error.errno == errno.ENOENT
- and error.filename
- and len(error.filename) > 260
- ):
- parts.append(
- "HINT: This error might have occurred since "
- "this system does not have Windows Long Path "
- "support enabled. You can find information on "
- "how to enable this at "
- "https://pip.pypa.io/warnings/enable-long-paths\n"
- )
-
- return "".join(parts).strip() + "\n"
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/api.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/api.py
deleted file mode 100644
index f140e8b6db8fe4d373b539c0576ca28b45e837aa..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/api.py
+++ /dev/null
@@ -1,179 +0,0 @@
-from __future__ import annotations
-
-import os
-import sys
-from abc import ABC, abstractmethod
-from pathlib import Path
-
-if sys.version_info >= (3, 8): # pragma: no branch
- from typing import Literal # pragma: no cover
-
-
-class PlatformDirsABC(ABC):
- """
- Abstract base class for platform directories.
- """
-
- def __init__(
- self,
- appname: str | None = None,
- appauthor: str | None | Literal[False] = None,
- version: str | None = None,
- roaming: bool = False,
- multipath: bool = False,
- opinion: bool = True,
- ensure_exists: bool = False,
- ):
- """
- Create a new platform directory.
-
- :param appname: See `appname`.
- :param appauthor: See `appauthor`.
- :param version: See `version`.
- :param roaming: See `roaming`.
- :param multipath: See `multipath`.
- :param opinion: See `opinion`.
- :param ensure_exists: See `ensure_exists`.
- """
- self.appname = appname #: The name of application.
- self.appauthor = appauthor
- """
- The name of the app author or distributing body for this application. Typically, it is the owning company name.
- Defaults to `appname`. You may pass ``False`` to disable it.
- """
- self.version = version
- """
- An optional version path element to append to the path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this would typically be ``.``.
- """
- self.roaming = roaming
- """
- Whether to use the roaming appdata directory on Windows. That means that for users on a Windows network setup
- for roaming profiles, this user data will be synced on login (see
- `here `_).
- """
- self.multipath = multipath
- """
- An optional parameter only applicable to Unix/Linux which indicates that the entire list of data dirs should be
- returned. By default, the first item would only be returned.
- """
- self.opinion = opinion #: A flag to indicating to use opinionated values.
- self.ensure_exists = ensure_exists
- """
- Optionally create the directory (and any missing parents) upon access if it does not exist.
- By default, no directories are created.
- """
-
- def _append_app_name_and_version(self, *base: str) -> str:
- params = list(base[1:])
- if self.appname:
- params.append(self.appname)
- if self.version:
- params.append(self.version)
- path = os.path.join(base[0], *params)
- self._optionally_create_directory(path)
- return path
-
- def _optionally_create_directory(self, path: str) -> None:
- if self.ensure_exists:
- Path(path).mkdir(parents=True, exist_ok=True)
-
- @property
- @abstractmethod
- def user_data_dir(self) -> str:
- """:return: data directory tied to the user"""
-
- @property
- @abstractmethod
- def site_data_dir(self) -> str:
- """:return: data directory shared by users"""
-
- @property
- @abstractmethod
- def user_config_dir(self) -> str:
- """:return: config directory tied to the user"""
-
- @property
- @abstractmethod
- def site_config_dir(self) -> str:
- """:return: config directory shared by the users"""
-
- @property
- @abstractmethod
- def user_cache_dir(self) -> str:
- """:return: cache directory tied to the user"""
-
- @property
- @abstractmethod
- def site_cache_dir(self) -> str:
- """:return: cache directory shared by users"""
-
- @property
- @abstractmethod
- def user_state_dir(self) -> str:
- """:return: state directory tied to the user"""
-
- @property
- @abstractmethod
- def user_log_dir(self) -> str:
- """:return: log directory tied to the user"""
-
- @property
- @abstractmethod
- def user_documents_dir(self) -> str:
- """:return: documents directory tied to the user"""
-
- @property
- @abstractmethod
- def user_runtime_dir(self) -> str:
- """:return: runtime directory tied to the user"""
-
- @property
- def user_data_path(self) -> Path:
- """:return: data path tied to the user"""
- return Path(self.user_data_dir)
-
- @property
- def site_data_path(self) -> Path:
- """:return: data path shared by users"""
- return Path(self.site_data_dir)
-
- @property
- def user_config_path(self) -> Path:
- """:return: config path tied to the user"""
- return Path(self.user_config_dir)
-
- @property
- def site_config_path(self) -> Path:
- """:return: config path shared by the users"""
- return Path(self.site_config_dir)
-
- @property
- def user_cache_path(self) -> Path:
- """:return: cache path tied to the user"""
- return Path(self.user_cache_dir)
-
- @property
- def site_cache_path(self) -> Path:
- """:return: cache path shared by users"""
- return Path(self.site_cache_dir)
-
- @property
- def user_state_path(self) -> Path:
- """:return: state path tied to the user"""
- return Path(self.user_state_dir)
-
- @property
- def user_log_path(self) -> Path:
- """:return: log path tied to the user"""
- return Path(self.user_log_dir)
-
- @property
- def user_documents_path(self) -> Path:
- """:return: documents path tied to the user"""
- return Path(self.user_documents_dir)
-
- @property
- def user_runtime_path(self) -> Path:
- """:return: runtime path tied to the user"""
- return Path(self.user_runtime_dir)
diff --git a/spaces/CCaniggia/GPT/README.md b/spaces/CCaniggia/GPT/README.md
deleted file mode 100644
index d9ec9dfe5bfcc04d8532828d0e214ebeb12feb3c..0000000000000000000000000000000000000000
--- a/spaces/CCaniggia/GPT/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: GPT
-emoji: 🌍
-colorFrom: yellow
-colorTo: pink
-sdk: docker
-pinned: false
-license: mit
-app_port: 8080
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/attention.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/attention.py
deleted file mode 100644
index 2fc9e8cde53005a2aaa9e42e58922b488376fa8a..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/attention.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import torch
-import torch.nn as nn
-from torch.nn.utils.weight_norm import weight_norm
-from fc import FCNet
-
-
-class Attention(nn.Module):
- def __init__(self, v_dim, q_dim, num_hid):
- super(Attention, self).__init__()
- self.nonlinear = FCNet([v_dim + q_dim, num_hid])
- self.linear = weight_norm(nn.Linear(num_hid, 1), dim=None)
-
- def forward(self, v, q):
- """
- v: [batch, k, vdim]
- q: [batch, qdim]
- """
- logits = self.logits(v, q)
- w = nn.functional.softmax(logits, 1)
- return w
-
- def logits(self, v, q):
- num_objs = v.size(1)
- q = q.unsqueeze(1).repeat(1, num_objs, 1)
- vq = torch.cat((v, q), 2)
- joint_repr = self.nonlinear(vq)
- logits = self.linear(joint_repr)
- return logits
-
-
-class NewAttention(nn.Module):
- def __init__(self, v_dim, q_dim, num_hid, dropout=0.2):
- super(NewAttention, self).__init__()
-
- self.v_proj = FCNet([v_dim, num_hid])
- self.q_proj = FCNet([q_dim, num_hid])
- self.dropout = nn.Dropout(dropout)
- self.linear = weight_norm(nn.Linear(q_dim, 1), dim=None)
-
- def forward(self, v, q):
- """
- v: [batch, k, vdim]
- q: [batch, qdim]
- """
- logits = self.logits(v, q)
- w = nn.functional.softmax(logits, 1)
- return w
-
- def logits(self, v, q):
- batch, k, _ = v.size()
- v_proj = self.v_proj(v) # [batch, k, qdim]
- q_proj = self.q_proj(q).unsqueeze(1).repeat(1, k, 1)
- joint_repr = v_proj * q_proj
- joint_repr = self.dropout(joint_repr)
- logits = self.linear(joint_repr)
- return logits
diff --git a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/minimum_system.h b/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/minimum_system.h
deleted file mode 100644
index 45b5a592fc5796892c143bf677ca988d788ec20d..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/minimum_system.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-#include
-#include
-#include
-
-namespace thrust
-{
-namespace detail
-{
-
-
-template
- struct unrelated_systems {};
-
-
-// if a minimum system exists for these arguments, return it
-// otherwise, collect the arguments and report them as unrelated
-template
- struct minimum_system
- : thrust::detail::eval_if<
- is_metafunction_defined<
- minimum_type
- >::value,
- minimum_type,
- thrust::detail::identity_<
- unrelated_systems
- >
- >
-{}; // end minimum_system
-
-
-} // end detail
-} // end thrust
-
diff --git a/spaces/CVPR/WALT/mmdet/core/evaluation/recall.py b/spaces/CVPR/WALT/mmdet/core/evaluation/recall.py
deleted file mode 100644
index 23ec744f552db1a4a76bfa63b7cc8b357deb3140..0000000000000000000000000000000000000000
--- a/spaces/CVPR/WALT/mmdet/core/evaluation/recall.py
+++ /dev/null
@@ -1,189 +0,0 @@
-from collections.abc import Sequence
-
-import numpy as np
-from mmcv.utils import print_log
-from terminaltables import AsciiTable
-
-from .bbox_overlaps import bbox_overlaps
-
-
-def _recalls(all_ious, proposal_nums, thrs):
-
- img_num = all_ious.shape[0]
- total_gt_num = sum([ious.shape[0] for ious in all_ious])
-
- _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)
- for k, proposal_num in enumerate(proposal_nums):
- tmp_ious = np.zeros(0)
- for i in range(img_num):
- ious = all_ious[i][:, :proposal_num].copy()
- gt_ious = np.zeros((ious.shape[0]))
- if ious.size == 0:
- tmp_ious = np.hstack((tmp_ious, gt_ious))
- continue
- for j in range(ious.shape[0]):
- gt_max_overlaps = ious.argmax(axis=1)
- max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]
- gt_idx = max_ious.argmax()
- gt_ious[j] = max_ious[gt_idx]
- box_idx = gt_max_overlaps[gt_idx]
- ious[gt_idx, :] = -1
- ious[:, box_idx] = -1
- tmp_ious = np.hstack((tmp_ious, gt_ious))
- _ious[k, :] = tmp_ious
-
- _ious = np.fliplr(np.sort(_ious, axis=1))
- recalls = np.zeros((proposal_nums.size, thrs.size))
- for i, thr in enumerate(thrs):
- recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num)
-
- return recalls
-
-
-def set_recall_param(proposal_nums, iou_thrs):
- """Check proposal_nums and iou_thrs and set correct format."""
- if isinstance(proposal_nums, Sequence):
- _proposal_nums = np.array(proposal_nums)
- elif isinstance(proposal_nums, int):
- _proposal_nums = np.array([proposal_nums])
- else:
- _proposal_nums = proposal_nums
-
- if iou_thrs is None:
- _iou_thrs = np.array([0.5])
- elif isinstance(iou_thrs, Sequence):
- _iou_thrs = np.array(iou_thrs)
- elif isinstance(iou_thrs, float):
- _iou_thrs = np.array([iou_thrs])
- else:
- _iou_thrs = iou_thrs
-
- return _proposal_nums, _iou_thrs
-
-
-def eval_recalls(gts,
- proposals,
- proposal_nums=None,
- iou_thrs=0.5,
- logger=None):
- """Calculate recalls.
-
- Args:
- gts (list[ndarray]): a list of arrays of shape (n, 4)
- proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5)
- proposal_nums (int | Sequence[int]): Top N proposals to be evaluated.
- iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5.
- logger (logging.Logger | str | None): The way to print the recall
- summary. See `mmcv.utils.print_log()` for details. Default: None.
-
- Returns:
- ndarray: recalls of different ious and proposal nums
- """
-
- img_num = len(gts)
- assert img_num == len(proposals)
-
- proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)
-
- all_ious = []
- for i in range(img_num):
- if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:
- scores = proposals[i][:, 4]
- sort_idx = np.argsort(scores)[::-1]
- img_proposal = proposals[i][sort_idx, :]
- else:
- img_proposal = proposals[i]
- prop_num = min(img_proposal.shape[0], proposal_nums[-1])
- if gts[i] is None or gts[i].shape[0] == 0:
- ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
- else:
- ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4])
- all_ious.append(ious)
- all_ious = np.array(all_ious)
- recalls = _recalls(all_ious, proposal_nums, iou_thrs)
-
- print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger)
- return recalls
-
-
-def print_recall_summary(recalls,
- proposal_nums,
- iou_thrs,
- row_idxs=None,
- col_idxs=None,
- logger=None):
- """Print recalls in a table.
-
- Args:
- recalls (ndarray): calculated from `bbox_recalls`
- proposal_nums (ndarray or list): top N proposals
- iou_thrs (ndarray or list): iou thresholds
- row_idxs (ndarray): which rows(proposal nums) to print
- col_idxs (ndarray): which cols(iou thresholds) to print
- logger (logging.Logger | str | None): The way to print the recall
- summary. See `mmcv.utils.print_log()` for details. Default: None.
- """
- proposal_nums = np.array(proposal_nums, dtype=np.int32)
- iou_thrs = np.array(iou_thrs)
- if row_idxs is None:
- row_idxs = np.arange(proposal_nums.size)
- if col_idxs is None:
- col_idxs = np.arange(iou_thrs.size)
- row_header = [''] + iou_thrs[col_idxs].tolist()
- table_data = [row_header]
- for i, num in enumerate(proposal_nums[row_idxs]):
- row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()]
- row.insert(0, num)
- table_data.append(row)
- table = AsciiTable(table_data)
- print_log('\n' + table.table, logger=logger)
-
-
-def plot_num_recall(recalls, proposal_nums):
- """Plot Proposal_num-Recalls curve.
-
- Args:
- recalls(ndarray or list): shape (k,)
- proposal_nums(ndarray or list): same shape as `recalls`
- """
- if isinstance(proposal_nums, np.ndarray):
- _proposal_nums = proposal_nums.tolist()
- else:
- _proposal_nums = proposal_nums
- if isinstance(recalls, np.ndarray):
- _recalls = recalls.tolist()
- else:
- _recalls = recalls
-
- import matplotlib.pyplot as plt
- f = plt.figure()
- plt.plot([0] + _proposal_nums, [0] + _recalls)
- plt.xlabel('Proposal num')
- plt.ylabel('Recall')
- plt.axis([0, proposal_nums.max(), 0, 1])
- f.show()
-
-
-def plot_iou_recall(recalls, iou_thrs):
- """Plot IoU-Recalls curve.
-
- Args:
- recalls(ndarray or list): shape (k,)
- iou_thrs(ndarray or list): same shape as `recalls`
- """
- if isinstance(iou_thrs, np.ndarray):
- _iou_thrs = iou_thrs.tolist()
- else:
- _iou_thrs = iou_thrs
- if isinstance(recalls, np.ndarray):
- _recalls = recalls.tolist()
- else:
- _recalls = recalls
-
- import matplotlib.pyplot as plt
- f = plt.figure()
- plt.plot(_iou_thrs + [1.0], _recalls + [0.])
- plt.xlabel('IoU')
- plt.ylabel('Recall')
- plt.axis([iou_thrs.min(), 1, 0, 1])
- f.show()
diff --git a/spaces/Catmeow/Face2Painting_From_Photo/README.md b/spaces/Catmeow/Face2Painting_From_Photo/README.md
deleted file mode 100644
index 53ef3797b6cdd0bbd5000e26f81ba540013aeec4..0000000000000000000000000000000000000000
--- a/spaces/Catmeow/Face2Painting_From_Photo/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Face to Hand-painted style From Photo
-emoji: 👨🦱 👩🦰
-colorFrom: yellow
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.8.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ChandraMohanNayal/AutoGPT/autogpt/setup.py b/spaces/ChandraMohanNayal/AutoGPT/autogpt/setup.py
deleted file mode 100644
index bfa68201b62bf67230a61fb1ecb00d1ab0ef0631..0000000000000000000000000000000000000000
--- a/spaces/ChandraMohanNayal/AutoGPT/autogpt/setup.py
+++ /dev/null
@@ -1,77 +0,0 @@
-"""Set up the AI and its goals"""
-from colorama import Fore, Style
-
-from autogpt import utils
-from autogpt.config.ai_config import AIConfig
-from autogpt.logs import logger
-
-
-def prompt_user() -> AIConfig:
- """Prompt the user for input
-
- Returns:
- AIConfig: The AIConfig object containing the user's input
- """
- ai_name = ""
- # Construct the prompt
- logger.typewriter_log(
- "Welcome to Auto-GPT! ",
- Fore.GREEN,
- "run with '--help' for more information.",
- speak_text=True,
- )
-
- logger.typewriter_log(
- "Create an AI-Assistant:",
- Fore.GREEN,
- "Enter the name of your AI and its role below. Entering nothing will load"
- " defaults.",
- speak_text=True,
- )
-
- # Get AI Name from User
- logger.typewriter_log(
- "Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
- )
- ai_name = utils.clean_input("AI Name: ")
- if ai_name == "":
- ai_name = "Entrepreneur-GPT"
-
- logger.typewriter_log(
- f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
- )
-
- # Get AI Role from User
- logger.typewriter_log(
- "Describe your AI's role: ",
- Fore.GREEN,
- "For example, 'an AI designed to autonomously develop and run businesses with"
- " the sole goal of increasing your net worth.'",
- )
- ai_role = utils.clean_input(f"{ai_name} is: ")
- if ai_role == "":
- ai_role = "an AI designed to autonomously develop and run businesses with the"
- " sole goal of increasing your net worth."
-
- # Enter up to 5 goals for the AI
- logger.typewriter_log(
- "Enter up to 5 goals for your AI: ",
- Fore.GREEN,
- "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
- " multiple businesses autonomously'",
- )
- print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
- ai_goals = []
- for i in range(5):
- ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
- if ai_goal == "":
- break
- ai_goals.append(ai_goal)
- if not ai_goals:
- ai_goals = [
- "Increase net worth",
- "Grow Twitter Account",
- "Develop and manage multiple businesses autonomously",
- ]
-
- return AIConfig(ai_name, ai_role, ai_goals)
diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/config/paths_catalog.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/config/paths_catalog.py
deleted file mode 100644
index 0ad2540e07c484eff858363003f3015a5f99713d..0000000000000000000000000000000000000000
--- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/config/paths_catalog.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-"""Centralized catalog of paths."""
-
-import os
-
-class DatasetCatalog(object):
- DATA_DIR = "/home/zhangbq/ws/ct/dataset/"
- DATASETS = {
- "ic15_train": (
- "ic15/ic15_train_images",
- "ic15/annotations/ic15_train.json"
- ),
- "ic15_test": (
- "ic15/ic15_test_images",
- "ic15/annotations/ic15_test.json"
- ),
- "CTW1500_train": (
- "ctw/ctw_train_images",
- "ctw/annotations/ctw_train.json"
- ),
- "CTW1500_test": (
- "ctw/ctw_test_images",
- "ctw/annotations/ctw_test.json"
- )
-
-}
-
- @staticmethod
- def get(name):
- data_dir = DatasetCatalog.DATA_DIR
- attrs = DatasetCatalog.DATASETS[name]
- if "coco" in name:
- args = dict(
- root=os.path.join(data_dir, attrs["img_dir"]),
- ann_file=os.path.join(data_dir, attrs["ann_file"]),
- )
- return dict(
- factory="COCODataset",
- args=args,
- )
- elif "voc" in name:
- args = dict(
- data_dir=os.path.join(data_dir, attrs["data_dir"]),
- split=attrs["split"],
- )
- return dict(
- factory="PascalVOCDataset",
- args=args,
- )
- elif True:
- args = dict(
- root=os.path.join(data_dir, attrs[0]),
- ann_file=os.path.join(data_dir, attrs[1]),
- )
- return dict(
- factory="WordDataset",
- args=args,
- )
- raise RuntimeError("Dataset not available: {}".format(name))
-
-
-class ModelCatalog(object):
- S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron"
- C2_IMAGENET_MODELS = {
- "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
- "MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
- "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
- "MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
- "FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
- }
-
- C2_DETECTRON_SUFFIX = "output/train/{}coco_2014_train%3A{}coco_2014_valminusminival/generalized_rcnn/model_final.pkl"
- C2_DETECTRON_MODELS = {
- "35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
- "35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
- "35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
- "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
- "35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
- "35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
- "35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
- "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
- "37129812/e2e_mask_rcnn_X-152-32x8d-FPN-IN5k_1.44x": "09_35_36.8pzTQKYK",
- # keypoints
- "37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "08_42_54.kdzV35ao"
- }
-
- @staticmethod
- def get(name):
- if name.startswith("Caffe2Detectron/COCO"):
- return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
- if name.startswith("ImageNetPretrained"):
- return ModelCatalog.get_c2_imagenet_pretrained(name)
- raise RuntimeError("model not present in the catalog {}".format(name))
-
- @staticmethod
- def get_c2_imagenet_pretrained(name):
- prefix = ModelCatalog.S3_C2_DETECTRON_URL
- name = name[len("ImageNetPretrained/"):]
- name = ModelCatalog.C2_IMAGENET_MODELS[name]
- url = "/".join([prefix, name])
- return url
-
- @staticmethod
- def get_c2_detectron_12_2017_baselines(name):
- # Detectron C2 models are stored following the structure
- # prefix//2012_2017_baselines/.yaml./suffix
- # we use as identifiers in the catalog Caffe2Detectron/COCO//
- prefix = ModelCatalog.S3_C2_DETECTRON_URL
- dataset_tag = "keypoints_" if "keypoint" in name else ""
- suffix = ModelCatalog.C2_DETECTRON_SUFFIX.format(dataset_tag, dataset_tag)
- # remove identification prefix
- name = name[len("Caffe2Detectron/COCO/"):]
- # split in and
- model_id, model_name = name.split("/")
- # parsing to make it match the url address from the Caffe2 models
- model_name = "{}.yaml".format(model_name)
- signature = ModelCatalog.C2_DETECTRON_MODELS[name]
- unique_name = ".".join([model_name, signature])
- url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
- return url
diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/conversation/__init__.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/conversation/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImtImagePlugin.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImtImagePlugin.py
deleted file mode 100644
index ac267457b0682a975a1a33da475c96531c398bd7..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImtImagePlugin.py
+++ /dev/null
@@ -1,101 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# IM Tools support for PIL
-#
-# history:
-# 1996-05-27 fl Created (read 8-bit images only)
-# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.2)
-#
-# Copyright (c) Secret Labs AB 1997-2001.
-# Copyright (c) Fredrik Lundh 1996-2001.
-#
-# See the README file for information on usage and redistribution.
-#
-
-
-import re
-
-from . import Image, ImageFile
-
-#
-# --------------------------------------------------------------------
-
-field = re.compile(rb"([a-z]*) ([^ \r\n]*)")
-
-
-##
-# Image plugin for IM Tools images.
-
-
-class ImtImageFile(ImageFile.ImageFile):
- format = "IMT"
- format_description = "IM Tools"
-
- def _open(self):
- # Quick rejection: if there's not a LF among the first
- # 100 bytes, this is (probably) not a text header.
-
- buffer = self.fp.read(100)
- if b"\n" not in buffer:
- msg = "not an IM file"
- raise SyntaxError(msg)
-
- xsize = ysize = 0
-
- while True:
- if buffer:
- s = buffer[:1]
- buffer = buffer[1:]
- else:
- s = self.fp.read(1)
- if not s:
- break
-
- if s == b"\x0C":
- # image data begins
- self.tile = [
- (
- "raw",
- (0, 0) + self.size,
- self.fp.tell() - len(buffer),
- (self.mode, 0, 1),
- )
- ]
-
- break
-
- else:
- # read key/value pair
- if b"\n" not in buffer:
- buffer += self.fp.read(100)
- lines = buffer.split(b"\n")
- s += lines.pop(0)
- buffer = b"\n".join(lines)
- if len(s) == 1 or len(s) > 100:
- break
- if s[0] == ord(b"*"):
- continue # comment
-
- m = field.match(s)
- if not m:
- break
- k, v = m.group(1, 2)
- if k == b"width":
- xsize = int(v)
- self._size = xsize, ysize
- elif k == b"height":
- ysize = int(v)
- self._size = xsize, ysize
- elif k == b"pixel" and v == b"n8":
- self.mode = "L"
-
-
-#
-# --------------------------------------------------------------------
-
-Image.register_open(ImtImageFile.format, ImtImageFile)
-
-#
-# no extension registered (".im" is simply too common)
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Upload-9bb55fba.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Upload-9bb55fba.js
deleted file mode 100644
index b891290a5d0af3ee834d412ae00c0b908ddfb490..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Upload-9bb55fba.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{S as H,e as I,s as J,a9 as L,N as A,O as V,K as o,U as F,p as W,M as B,Q as f,Y as m,af as b,ab as X,ac as Z,ad as x,z as $,v as ee,A as ae,a1 as le,B as te,F as y,h as ie}from"./index-1d65707a.js";import{b as ne}from"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";function re(l){let a,n,r,c,g,u,i,k,z;const v=l[15].default,d=L(v,l,l[14],null);return{c(){a=A("div"),d&&d.c(),n=V(),r=A("input"),o(r,"type","file"),o(r,"accept",l[0]),r.multiple=c=l[4]==="multiple"||void 0,o(r,"webkitdirectory",g=l[4]==="directory"||void 0),o(r,"mozdirectory",u=l[4]==="directory"||void 0),o(r,"class","svelte-116rqfv"),o(a,"class","svelte-116rqfv"),F(a,"center",l[2]),F(a,"boundedheight",l[1]),F(a,"flex",l[3])},m(t,s){W(t,a,s),d&&d.m(a,null),B(a,n),B(a,r),l[23](r),i=!0,k||(z=[f(r,"change",l[8]),f(a,"drag",m(b(l[16]))),f(a,"dragstart",m(b(l[17]))),f(a,"dragend",m(b(l[18]))),f(a,"dragover",m(b(l[19]))),f(a,"dragenter",m(b(l[20]))),f(a,"dragleave",m(b(l[21]))),f(a,"drop",m(b(l[22]))),f(a,"click",l[7]),f(a,"drop",l[9]),f(a,"dragenter",l[6]),f(a,"dragleave",l[6])],k=!0)},p(t,[s]){d&&d.p&&(!i||s&16384)&&X(d,v,t,t[14],i?x(v,t[14],s,null):Z(t[14]),null),(!i||s&1)&&o(r,"accept",t[0]),(!i||s&16&&c!==(c=t[4]==="multiple"||void 0))&&(r.multiple=c),(!i||s&16&&g!==(g=t[4]==="directory"||void 0))&&o(r,"webkitdirectory",g),(!i||s&16&&u!==(u=t[4]==="directory"||void 0))&&o(r,"mozdirectory",u),(!i||s&4)&&F(a,"center",t[2]),(!i||s&2)&&F(a,"boundedheight",t[1]),(!i||s&8)&&F(a,"flex",t[3])},i(t){i||($(d,t),i=!0)},o(t){ee(d,t),i=!1},d(t){t&&ae(a),d&&d.d(t),l[23](null),k=!1,le(z)}}}function de(l,a,n){let{$$slots:r={},$$scope:c}=a,{filetype:g=null}=a,{include_file_metadata:u=!0}=a,{dragging:i=!1}=a,{boundedheight:k=!0}=a,{center:z=!0}=a,{flex:v=!0}=a,{file_count:d="single"}=a,{disable_click:t=!1}=a,{parse_to_data_url:s=!0}=a,w;const S=te(),C=()=>{n(10,i=!i)},E=()=>{t||(n(5,w.value="",w),w.click())},D=async e=>{let h=Array.from(e);if(!(!e.length||!window.FileReader)){if(d==="single"&&(h=[e[0]]),u)var T=h.map(_=>({name:_.name,size:_.size}));var p=[],U=[];s?U=await Promise.all(h.map(_=>ne(_))):U=h,u?s?p=U.map((_,q)=>({data:_,...T[q]})):p=U.map((_,q)=>({data:"",blob:_,...T[q]})):p=U,S("load",d==="single"?p[0]:p)}},K=async e=>{const h=e.target;h.files&&await D(h.files)},M=async e=>{n(10,i=!1),e.dataTransfer?.files&&await D(e.dataTransfer.files)};function N(e){y.call(this,l,e)}function O(e){y.call(this,l,e)}function P(e){y.call(this,l,e)}function Q(e){y.call(this,l,e)}function R(e){y.call(this,l,e)}function Y(e){y.call(this,l,e)}function j(e){y.call(this,l,e)}function G(e){ie[e?"unshift":"push"](()=>{w=e,n(5,w)})}return l.$$set=e=>{"filetype"in e&&n(0,g=e.filetype),"include_file_metadata"in e&&n(11,u=e.include_file_metadata),"dragging"in e&&n(10,i=e.dragging),"boundedheight"in e&&n(1,k=e.boundedheight),"center"in e&&n(2,z=e.center),"flex"in e&&n(3,v=e.flex),"file_count"in e&&n(4,d=e.file_count),"disable_click"in e&&n(12,t=e.disable_click),"parse_to_data_url"in e&&n(13,s=e.parse_to_data_url),"$$scope"in e&&n(14,c=e.$$scope)},[g,k,z,v,d,w,C,E,K,M,i,u,t,s,c,r,N,O,P,Q,R,Y,j,G]}class ue extends H{constructor(a){super(),I(this,a,de,re,J,{filetype:0,include_file_metadata:11,dragging:10,boundedheight:1,center:2,flex:3,file_count:4,disable_click:12,parse_to_data_url:13})}}export{ue as U};
-//# sourceMappingURL=Upload-9bb55fba.js.map
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/linear-58a44b5e.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/linear-58a44b5e.js
deleted file mode 100644
index 5957ab4a575538fb9023ff2dbfffc2cab1f1743e..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/linear-58a44b5e.js
+++ /dev/null
@@ -1,2 +0,0 @@
-function W(n,t){return n==null||t==null?NaN:nt?1:n>=t?0:NaN}function En(n){let t=n,e=n,r=n;n.length!==2&&(t=(a,u)=>n(a)-u,e=W,r=(a,u)=>W(n(a),u));function i(a,u,s=0,c=a.length){if(s>>1;r(a[h],u)<0?s=h+1:c=h}while(s>>1;r(a[h],u)<=0?s=h+1:c=h}while(ss&&t(a[h-1],u)>-t(a[h],u)?h-1:h}return{left:i,center:o,right:f}}function Un(n){return n===null?NaN:+n}function*Qt(n,t){if(t===void 0)for(let e of n)e!=null&&(e=+e)>=e&&(yield e);else{let e=-1;for(let r of n)(r=t(r,++e,n))!=null&&(r=+r)>=r&&(yield r)}}const Pn=En(W),Yn=Pn.right,Ut=Pn.left;En(Un).center;const Jn=Yn;var nn=Math.sqrt(50),tn=Math.sqrt(10),en=Math.sqrt(2);function Kn(n,t,e){var r,i=-1,f,o,a;if(t=+t,n=+n,e=+e,n===t&&e>0)return[n];if((r=t0){let u=Math.round(n/a),s=Math.round(t/a);for(u*at&&--s,o=new Array(f=s-u+1);++it&&--s,o=new Array(f=s-u+1);++i=0?(f>=nn?10:f>=tn?5:f>=en?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(f>=nn?10:f>=tn?5:f>=en?2:1)}function Wn(n,t,e){var r=Math.abs(t-n)/Math.max(0,e),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),f=r/i;return f>=nn?i*=10:f>=tn?i*=5:f>=en&&(i*=2),t=1e21?n.toLocaleString("en").replace(/,/g,""):n.toString(10)}function G(n,t){if((e=(n=t?n.toExponential(t-1):n.toExponential()).indexOf("e"))<0)return null;var e,r=n.slice(0,e);return[r.length>1?r[0]+r.slice(2):r,+n.slice(e+1)]}function L(n){return n=G(Math.abs(n)),n?n[1]:NaN}function tt(n,t){return function(e,r){for(var i=e.length,f=[],o=0,a=n[0],u=0;i>0&&a>0&&(u+a+1>r&&(a=Math.max(1,r-u)),f.push(e.substring(i-=a,i+a)),!((u+=a+1)>r));)a=n[o=(o+1)%n.length];return f.reverse().join(t)}}function et(n){return function(t){return t.replace(/[0-9]/g,function(e){return n[+e]})}}var rt=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Z(n){if(!(t=rt.exec(n)))throw new Error("invalid format: "+n);var t;return new sn({fill:t[1],align:t[2],sign:t[3],symbol:t[4],zero:t[5],width:t[6],comma:t[7],precision:t[8]&&t[8].slice(1),trim:t[9],type:t[10]})}Z.prototype=sn.prototype;function sn(n){this.fill=n.fill===void 0?" ":n.fill+"",this.align=n.align===void 0?">":n.align+"",this.sign=n.sign===void 0?"-":n.sign+"",this.symbol=n.symbol===void 0?"":n.symbol+"",this.zero=!!n.zero,this.width=n.width===void 0?void 0:+n.width,this.comma=!!n.comma,this.precision=n.precision===void 0?void 0:+n.precision,this.trim=!!n.trim,this.type=n.type===void 0?"":n.type+""}sn.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(this.width===void 0?"":Math.max(1,this.width|0))+(this.comma?",":"")+(this.precision===void 0?"":"."+Math.max(0,this.precision|0))+(this.trim?"~":"")+this.type};function it(n){n:for(var t=n.length,e=1,r=-1,i;e0&&(r=0);break}return r>0?n.slice(0,r)+n.slice(i+1):n}var qn;function at(n,t){var e=G(n,t);if(!e)return n+"";var r=e[0],i=e[1],f=i-(qn=Math.max(-8,Math.min(8,Math.floor(i/3)))*3)+1,o=r.length;return f===o?r:f>o?r+new Array(f-o+1).join("0"):f>0?r.slice(0,f)+"."+r.slice(f):"0."+new Array(1-f).join("0")+G(n,Math.max(0,t+f-1))[0]}function xn(n,t){var e=G(n,t);if(!e)return n+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}const mn={"%":(n,t)=>(n*100).toFixed(t),b:n=>Math.round(n).toString(2),c:n=>n+"",d:nt,e:(n,t)=>n.toExponential(t),f:(n,t)=>n.toFixed(t),g:(n,t)=>n.toPrecision(t),o:n=>Math.round(n).toString(8),p:(n,t)=>xn(n*100,t),r:xn,s:at,X:n=>Math.round(n).toString(16).toUpperCase(),x:n=>Math.round(n).toString(16)};function bn(n){return n}var pn=Array.prototype.map,yn=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"];function ft(n){var t=n.grouping===void 0||n.thousands===void 0?bn:tt(pn.call(n.grouping,Number),n.thousands+""),e=n.currency===void 0?"":n.currency[0]+"",r=n.currency===void 0?"":n.currency[1]+"",i=n.decimal===void 0?".":n.decimal+"",f=n.numerals===void 0?bn:et(pn.call(n.numerals,String)),o=n.percent===void 0?"%":n.percent+"",a=n.minus===void 0?"−":n.minus+"",u=n.nan===void 0?"NaN":n.nan+"";function s(h){h=Z(h);var l=h.fill,p=h.align,g=h.sign,k=h.symbol,v=h.zero,N=h.width,R=h.comma,y=h.precision,H=h.trim,m=h.type;m==="n"?(R=!0,m="g"):mn[m]||(y===void 0&&(y=12),H=!0,m="g"),(v||l==="0"&&p==="=")&&(v=!0,l="0",p="=");var Vn=k==="$"?e:k==="#"&&/[boxX]/.test(m)?"0"+m.toLowerCase():"",Xn=k==="$"?r:/[%p]/.test(m)?o:"",ln=mn[m],Qn=/[defgprs%]/.test(m);y=y===void 0?6:/[gprs]/.test(m)?Math.max(1,Math.min(21,y)):Math.max(0,Math.min(20,y));function dn(d){var A=Vn,b=Xn,E,gn,F;if(m==="c")b=ln(d)+b,d="";else{d=+d;var $=d<0||1/d<0;if(d=isNaN(d)?u:ln(Math.abs(d),y),H&&(d=it(d)),$&&+d==0&&g!=="+"&&($=!1),A=($?g==="("?g:a:g==="-"||g==="("?"":g)+A,b=(m==="s"?yn[8+qn/3]:"")+b+($&&g==="("?")":""),Qn){for(E=-1,gn=d.length;++EF||F>57){b=(F===46?i+d.slice(E+1):d.slice(E))+b,d=d.slice(0,E);break}}}R&&!v&&(d=t(d,1/0));var B=A.length+d.length+b.length,_=B>1)+A+d+b+_.slice(B);break;default:d=_+A+d+b;break}return f(d)}return dn.toString=function(){return h+""},dn}function c(h,l){var p=s((h=Z(h),h.type="f",h)),g=Math.max(-8,Math.min(8,Math.floor(L(l)/3)))*3,k=Math.pow(10,-g),v=yn[8+g/3];return function(N){return p(k*N)+v}}return{format:s,formatPrefix:c}}var D,Ln,Hn;ot({thousands:",",grouping:[3],currency:["$",""]});function ot(n){return D=ft(n),Ln=D.format,Hn=D.formatPrefix,D}function ut(n){return Math.max(0,-L(Math.abs(n)))}function st(n,t){return Math.max(0,Math.max(-8,Math.min(8,Math.floor(L(t)/3)))*3-L(Math.abs(n)))}function ht(n,t){return n=Math.abs(n),t=Math.abs(t)-n,Math.max(0,L(t)-L(n))+1}const rn=Math.PI,an=2*rn,S=1e-6,ct=an-S;function fn(){this._x0=this._y0=this._x1=this._y1=null,this._=""}function In(){return new fn}fn.prototype=In.prototype={constructor:fn,moveTo:function(n,t){this._+="M"+(this._x0=this._x1=+n)+","+(this._y0=this._y1=+t)},closePath:function(){this._x1!==null&&(this._x1=this._x0,this._y1=this._y0,this._+="Z")},lineTo:function(n,t){this._+="L"+(this._x1=+n)+","+(this._y1=+t)},quadraticCurveTo:function(n,t,e,r){this._+="Q"+ +n+","+ +t+","+(this._x1=+e)+","+(this._y1=+r)},bezierCurveTo:function(n,t,e,r,i,f){this._+="C"+ +n+","+ +t+","+ +e+","+ +r+","+(this._x1=+i)+","+(this._y1=+f)},arcTo:function(n,t,e,r,i){n=+n,t=+t,e=+e,r=+r,i=+i;var f=this._x1,o=this._y1,a=e-n,u=r-t,s=f-n,c=o-t,h=s*s+c*c;if(i<0)throw new Error("negative radius: "+i);if(this._x1===null)this._+="M"+(this._x1=n)+","+(this._y1=t);else if(h>S)if(!(Math.abs(c*a-u*s)>S)||!i)this._+="L"+(this._x1=n)+","+(this._y1=t);else{var l=e-f,p=r-o,g=a*a+u*u,k=l*l+p*p,v=Math.sqrt(g),N=Math.sqrt(h),R=i*Math.tan((rn-Math.acos((g+h-k)/(2*v*N)))/2),y=R/N,H=R/v;Math.abs(y-1)>S&&(this._+="L"+(n+y*s)+","+(t+y*c)),this._+="A"+i+","+i+",0,0,"+ +(c*l>s*p)+","+(this._x1=n+H*a)+","+(this._y1=t+H*u)}},arc:function(n,t,e,r,i,f){n=+n,t=+t,e=+e,f=!!f;var o=e*Math.cos(r),a=e*Math.sin(r),u=n+o,s=t+a,c=1^f,h=f?r-i:i-r;if(e<0)throw new Error("negative radius: "+e);this._x1===null?this._+="M"+u+","+s:(Math.abs(this._x1-u)>S||Math.abs(this._y1-s)>S)&&(this._+="L"+u+","+s),e&&(h<0&&(h=h%an+an),h>ct?this._+="A"+e+","+e+",0,1,"+c+","+(n-o)+","+(t-a)+"A"+e+","+e+",0,1,"+c+","+(this._x1=u)+","+(this._y1=s):h>S&&(this._+="A"+e+","+e+",0,"+ +(h>=rn)+","+c+","+(this._x1=n+e*Math.cos(i))+","+(this._y1=t+e*Math.sin(i))))},rect:function(n,t,e,r){this._+="M"+(this._x0=this._x1=+n)+","+(this._y0=this._y1=+t)+"h"+ +e+"v"+ +r+"h"+-e+"Z"},toString:function(){return this._}};function P(n){return function(){return n}}function lt(n){return typeof n=="object"&&"length"in n?n:Array.from(n)}function Tn(n){this._context=n}Tn.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||this._line!==0&&this._point===1)&&this._context.closePath(),this._line=1-this._line},point:function(n,t){switch(n=+n,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(n,t):this._context.moveTo(n,t);break;case 1:this._point=2;default:this._context.lineTo(n,t);break}}};function dt(n){return new Tn(n)}function gt(n){return n[0]}function xt(n){return n[1]}function Yt(n,t){var e=P(!0),r=null,i=dt,f=null;n=typeof n=="function"?n:n===void 0?gt:P(n),t=typeof t=="function"?t:t===void 0?xt:P(t);function o(a){var u,s=(a=lt(a)).length,c,h=!1,l;for(r==null&&(f=i(l=In())),u=0;u<=s;++u)!(u>8&15|t>>4&240,t>>4&15|t&240,(t&15)<<4|t&15,1):e===8?O(t>>24&255,t>>16&255,t>>8&255,(t&255)/255):e===4?O(t>>12&15|t>>8&240,t>>8&15|t>>4&240,t>>4&15|t&240,((t&15)<<4|t&15)/255):null):(t=pt.exec(n))?new x(t[1],t[2],t[3],1):(t=yt.exec(n))?new x(t[1]*255/100,t[2]*255/100,t[3]*255/100,1):(t=wt.exec(n))?O(t[1],t[2],t[3],t[4]):(t=Mt.exec(n))?O(t[1]*255/100,t[2]*255/100,t[3]*255/100,t[4]):(t=vt.exec(n))?An(t[1],t[2]/100,t[3]/100,1):(t=_t.exec(n))?An(t[1],t[2]/100,t[3]/100,t[4]):wn.hasOwnProperty(n)?_n(wn[n]):n==="transparent"?new x(NaN,NaN,NaN,0):null}function _n(n){return new x(n>>16&255,n>>8&255,n&255,1)}function O(n,t,e,r){return r<=0&&(n=t=e=NaN),new x(n,t,e,r)}function kt(n){return n instanceof C||(n=z(n)),n?(n=n.rgb(),new x(n.r,n.g,n.b,n.opacity)):new x}function X(n,t,e,r){return arguments.length===1?kt(n):new x(n,t,e,r??1)}function x(n,t,e,r){this.r=+n,this.g=+t,this.b=+e,this.opacity=+r}hn(x,X,zn(C,{brighter:function(n){return n=n==null?V:Math.pow(V,n),new x(this.r*n,this.g*n,this.b*n,this.opacity)},darker:function(n){return n=n==null?I:Math.pow(I,n),new x(this.r*n,this.g*n,this.b*n,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:Nn,formatHex:Nn,formatRgb:kn,toString:kn}));function Nn(){return"#"+Y(this.r)+Y(this.g)+Y(this.b)}function kn(){var n=this.opacity;return n=isNaN(n)?1:Math.max(0,Math.min(1,n)),(n===1?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(n===1?")":", "+n+")")}function Y(n){return n=Math.max(0,Math.min(255,Math.round(n)||0)),(n<16?"0":"")+n.toString(16)}function An(n,t,e,r){return r<=0?n=t=e=NaN:e<=0||e>=1?n=t=NaN:t<=0&&(n=NaN),new w(n,t,e,r)}function Cn(n){if(n instanceof w)return new w(n.h,n.s,n.l,n.opacity);if(n instanceof C||(n=z(n)),!n)return new w;if(n instanceof w)return n;n=n.rgb();var t=n.r/255,e=n.g/255,r=n.b/255,i=Math.min(t,e,r),f=Math.max(t,e,r),o=NaN,a=f-i,u=(f+i)/2;return a?(t===f?o=(e-r)/a+(e0&&u<1?0:o,new w(o,a,u,n.opacity)}function At(n,t,e,r){return arguments.length===1?Cn(n):new w(n,t,e,r??1)}function w(n,t,e,r){this.h=+n,this.s=+t,this.l=+e,this.opacity=+r}hn(w,At,zn(C,{brighter:function(n){return n=n==null?V:Math.pow(V,n),new w(this.h,this.s,this.l*n,this.opacity)},darker:function(n){return n=n==null?I:Math.pow(I,n),new w(this.h,this.s,this.l*n,this.opacity)},rgb:function(){var n=this.h%360+(this.h<0)*360,t=isNaN(n)||isNaN(this.s)?0:this.s,e=this.l,r=e+(e<.5?e:1-e)*t,i=2*e-r;return new x(J(n>=240?n-240:n+120,i,r),J(n,i,r),J(n<120?n+240:n-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var n=this.opacity;return n=isNaN(n)?1:Math.max(0,Math.min(1,n)),(n===1?"hsl(":"hsla(")+(this.h||0)+", "+(this.s||0)*100+"%, "+(this.l||0)*100+"%"+(n===1?")":", "+n+")")}}));function J(n,t,e){return(n<60?t+(e-t)*n/60:n<180?e:n<240?t+(e-t)*(240-n)/60:t)*255}function Fn(n,t,e,r,i){var f=n*n,o=f*n;return((1-3*n+3*f-o)*t+(4-6*f+3*o)*e+(1+3*n+3*f-3*o)*r+o*i)/6}function St(n){var t=n.length-1;return function(e){var r=e<=0?e=0:e>=1?(e=1,t-1):Math.floor(e*t),i=n[r],f=n[r+1],o=r>0?n[r-1]:2*i-f,a=r()=>n;function $n(n,t){return function(e){return n+e*t}}function Et(n,t,e){return n=Math.pow(n,e),t=Math.pow(t,e)-n,e=1/e,function(r){return Math.pow(n+r*t,e)}}function Kt(n,t){var e=t-n;return e?$n(n,e>180||e<-180?e-360*Math.round(e/360):e):U(isNaN(n)?t:n)}function Pt(n){return(n=+n)==1?Bn:function(t,e){return e-t?Et(t,e,n):U(isNaN(t)?e:t)}}function Bn(n,t){var e=t-n;return e?$n(n,e):U(isNaN(n)?t:n)}const Sn=function n(t){var e=Pt(t);function r(i,f){var o=e((i=X(i)).r,(f=X(f)).r),a=e(i.g,f.g),u=e(i.b,f.b),s=Bn(i.opacity,f.opacity);return function(c){return i.r=o(c),i.g=a(c),i.b=u(c),i.opacity=s(c),i+""}}return r.gamma=n,r}(1);function Dn(n){return function(t){var e=t.length,r=new Array(e),i=new Array(e),f=new Array(e),o,a;for(o=0;oe&&(f=t.slice(e,f),a[o]?a[o]+=f:a[++o]=f),(r=r[0])===(i=i[0])?a[o]?a[o]+=i:a[++o]=i:(a[++o]=null,u.push({i:o,x:Q(r,i)})),e=K.lastIndex;return et&&(e=n,n=t,t=e),function(r){return Math.max(n,Math.min(t,r))}}function $t(n,t,e){var r=n[0],i=n[1],f=t[0],o=t[1];return i2?Bt:$t,u=s=null,h}function h(l){return l==null||isNaN(l=+l)?f:(u||(u=a(n.map(r),t,e)))(r(o(l)))}return h.invert=function(l){return o(i((s||(s=a(t,n.map(r),Q)))(l)))},h.domain=function(l){return arguments.length?(n=Array.from(l,Ct),c()):n.slice()},h.range=function(l){return arguments.length?(t=Array.from(l),c()):t.slice()},h.rangeRound=function(l){return t=Array.from(l),e=Tt,c()},h.clamp=function(l){return arguments.length?(o=l?!0:j,c()):o!==j},h.interpolate=function(l){return arguments.length?(e=l,c()):e},h.unknown=function(l){return arguments.length?(f=l,h):f},function(l,p){return r=l,i=p,c()}}function Gt(){return Ot()(j,j)}function Zt(n,t,e,r){var i=Wn(n,t,e),f;switch(r=Z(r??",f"),r.type){case"s":{var o=Math.max(Math.abs(n),Math.abs(t));return r.precision==null&&!isNaN(f=st(i,o))&&(r.precision=f),Hn(r,o)}case"":case"e":case"g":case"p":case"r":{r.precision==null&&!isNaN(f=ht(i,Math.max(Math.abs(n),Math.abs(t))))&&(r.precision=f-(r.type==="e"));break}case"f":case"%":{r.precision==null&&!isNaN(f=ut(i))&&(r.precision=f-(r.type==="%")*2);break}}return Ln(r)}function Vt(n){var t=n.domain;return n.ticks=function(e){var r=t();return Kn(r[0],r[r.length-1],e??10)},n.tickFormat=function(e,r){var i=t();return Zt(i[0],i[i.length-1],e??10,r)},n.nice=function(e){e==null&&(e=10);var r=t(),i=0,f=r.length-1,o=r[i],a=r[f],u,s,c=10;for(a0;){if(s=jn(o,a,e),s===u)return r[i]=o,r[f]=a,t(r);if(s>0)o=Math.floor(o/s)*s,a=Math.ceil(a/s)*s;else if(s<0)o=Math.ceil(o*s)/s,a=Math.floor(a*s)/s;else break;u=s}return n},n}function Xt(){var n=Gt();return n.copy=function(){return Dt(n,Xt())},mt.apply(n,arguments),Vt(n)}export{Yn as $,At as A,Bn as B,C,cn as D,te as E,St as F,Rt as G,jt as H,On as I,qt as J,Sn as K,Wt as L,ne as M,Tt as N,It as O,Ct as P,Vt as Q,x as R,Ot as S,Dt as T,Kn as U,j as V,Jn as W,Gt as X,Jt as Y,Xt as Z,Yt as _,W as a,Zt as a0,X as a1,Ut as a2,Un as b,En as c,ht as d,st as e,Z as f,Ln as g,Hn as h,ft as i,P as j,In as k,dt as l,lt as m,Qt as n,mt as o,ut as p,hn as q,kt as r,zn as s,Wn as t,V as u,I as v,Kt as w,gt as x,xt as y,Q as z};
-//# sourceMappingURL=linear-58a44b5e.js.map
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_main.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_main.py
deleted file mode 100644
index 7c12ce841d377ba019da8fc3cb5b35b773317cc7..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_main.py
+++ /dev/null
@@ -1,506 +0,0 @@
-import functools
-import json
-import sys
-import typing
-
-import click
-import httpcore
-import pygments.lexers
-import pygments.util
-import rich.console
-import rich.markup
-import rich.progress
-import rich.syntax
-import rich.table
-
-from ._client import Client
-from ._exceptions import RequestError
-from ._models import Response
-from ._status_codes import codes
-
-
-def print_help() -> None:
- console = rich.console.Console()
-
- console.print("[bold]HTTPX :butterfly:", justify="center")
- console.print()
- console.print("A next generation HTTP client.", justify="center")
- console.print()
- console.print(
- "Usage: [bold]httpx[/bold] [cyan] [OPTIONS][/cyan] ", justify="left"
- )
- console.print()
-
- table = rich.table.Table.grid(padding=1, pad_edge=True)
- table.add_column("Parameter", no_wrap=True, justify="left", style="bold")
- table.add_column("Description")
- table.add_row(
- "-m, --method [cyan]METHOD",
- "Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD.\n"
- "[Default: GET, or POST if a request body is included]",
- )
- table.add_row(
- "-p, --params [cyan] ...",
- "Query parameters to include in the request URL.",
- )
- table.add_row(
- "-c, --content [cyan]TEXT", "Byte content to include in the request body."
- )
- table.add_row(
- "-d, --data [cyan] ...", "Form data to include in the request body."
- )
- table.add_row(
- "-f, --files [cyan] ...",
- "Form files to include in the request body.",
- )
- table.add_row("-j, --json [cyan]TEXT", "JSON data to include in the request body.")
- table.add_row(
- "-h, --headers [cyan] ...",
- "Include additional HTTP headers in the request.",
- )
- table.add_row(
- "--cookies [cyan] ...", "Cookies to include in the request."
- )
- table.add_row(
- "--auth [cyan]",
- "Username and password to include in the request. Specify '-' for the password to use "
- "a password prompt. Note that using --verbose/-v will expose the Authorization "
- "header, including the password encoding in a trivially reversible format.",
- )
-
- table.add_row(
- "--proxies [cyan]URL",
- "Send the request via a proxy. Should be the URL giving the proxy address.",
- )
-
- table.add_row(
- "--timeout [cyan]FLOAT",
- "Timeout value to use for network operations, such as establishing the connection, "
- "reading some data, etc... [Default: 5.0]",
- )
-
- table.add_row("--follow-redirects", "Automatically follow redirects.")
- table.add_row("--no-verify", "Disable SSL verification.")
- table.add_row(
- "--http2", "Send the request using HTTP/2, if the remote server supports it."
- )
-
- table.add_row(
- "--download [cyan]FILE",
- "Save the response content as a file, rather than displaying it.",
- )
-
- table.add_row("-v, --verbose", "Verbose output. Show request as well as response.")
- table.add_row("--help", "Show this message and exit.")
- console.print(table)
-
-
-def get_lexer_for_response(response: Response) -> str:
- content_type = response.headers.get("Content-Type")
- if content_type is not None:
- mime_type, _, _ = content_type.partition(";")
- try:
- return typing.cast(
- str, pygments.lexers.get_lexer_for_mimetype(mime_type.strip()).name
- )
- except pygments.util.ClassNotFound: # pragma: no cover
- pass
- return "" # pragma: no cover
-
-
-def format_request_headers(request: httpcore.Request, http2: bool = False) -> str:
- version = "HTTP/2" if http2 else "HTTP/1.1"
- headers = [
- (name.lower() if http2 else name, value) for name, value in request.headers
- ]
- method = request.method.decode("ascii")
- target = request.url.target.decode("ascii")
- lines = [f"{method} {target} {version}"] + [
- f"{name.decode('ascii')}: {value.decode('ascii')}" for name, value in headers
- ]
- return "\n".join(lines)
-
-
-def format_response_headers(
- http_version: bytes,
- status: int,
- reason_phrase: typing.Optional[bytes],
- headers: typing.List[typing.Tuple[bytes, bytes]],
-) -> str:
- version = http_version.decode("ascii")
- reason = (
- codes.get_reason_phrase(status)
- if reason_phrase is None
- else reason_phrase.decode("ascii")
- )
- lines = [f"{version} {status} {reason}"] + [
- f"{name.decode('ascii')}: {value.decode('ascii')}" for name, value in headers
- ]
- return "\n".join(lines)
-
-
-def print_request_headers(request: httpcore.Request, http2: bool = False) -> None:
- console = rich.console.Console()
- http_text = format_request_headers(request, http2=http2)
- syntax = rich.syntax.Syntax(http_text, "http", theme="ansi_dark", word_wrap=True)
- console.print(syntax)
- syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True)
- console.print(syntax)
-
-
-def print_response_headers(
- http_version: bytes,
- status: int,
- reason_phrase: typing.Optional[bytes],
- headers: typing.List[typing.Tuple[bytes, bytes]],
-) -> None:
- console = rich.console.Console()
- http_text = format_response_headers(http_version, status, reason_phrase, headers)
- syntax = rich.syntax.Syntax(http_text, "http", theme="ansi_dark", word_wrap=True)
- console.print(syntax)
- syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True)
- console.print(syntax)
-
-
-def print_response(response: Response) -> None:
- console = rich.console.Console()
- lexer_name = get_lexer_for_response(response)
- if lexer_name:
- if lexer_name.lower() == "json":
- try:
- data = response.json()
- text = json.dumps(data, indent=4)
- except ValueError: # pragma: no cover
- text = response.text
- else:
- text = response.text
-
- syntax = rich.syntax.Syntax(text, lexer_name, theme="ansi_dark", word_wrap=True)
- console.print(syntax)
- else:
- console.print(f"<{len(response.content)} bytes of binary data>")
-
-
-_PCTRTT = typing.Tuple[typing.Tuple[str, str], ...]
-_PCTRTTT = typing.Tuple[_PCTRTT, ...]
-_PeerCertRetDictType = typing.Dict[str, typing.Union[str, _PCTRTTT, _PCTRTT]]
-
-
-def format_certificate(cert: _PeerCertRetDictType) -> str: # pragma: no cover
- lines = []
- for key, value in cert.items():
- if isinstance(value, (list, tuple)):
- lines.append(f"* {key}:")
- for item in value:
- if key in ("subject", "issuer"):
- for sub_item in item:
- lines.append(f"* {sub_item[0]}: {sub_item[1]!r}")
- elif isinstance(item, tuple) and len(item) == 2:
- lines.append(f"* {item[0]}: {item[1]!r}")
- else:
- lines.append(f"* {item!r}")
- else:
- lines.append(f"* {key}: {value!r}")
- return "\n".join(lines)
-
-
-def trace(
- name: str, info: typing.Mapping[str, typing.Any], verbose: bool = False
-) -> None:
- console = rich.console.Console()
- if name == "connection.connect_tcp.started" and verbose:
- host = info["host"]
- console.print(f"* Connecting to {host!r}")
- elif name == "connection.connect_tcp.complete" and verbose:
- stream = info["return_value"]
- server_addr = stream.get_extra_info("server_addr")
- console.print(f"* Connected to {server_addr[0]!r} on port {server_addr[1]}")
- elif name == "connection.start_tls.complete" and verbose: # pragma: no cover
- stream = info["return_value"]
- ssl_object = stream.get_extra_info("ssl_object")
- version = ssl_object.version()
- cipher = ssl_object.cipher()
- server_cert = ssl_object.getpeercert()
- alpn = ssl_object.selected_alpn_protocol()
- console.print(f"* SSL established using {version!r} / {cipher[0]!r}")
- console.print(f"* Selected ALPN protocol: {alpn!r}")
- if server_cert:
- console.print("* Server certificate:")
- console.print(format_certificate(server_cert))
- elif name == "http11.send_request_headers.started" and verbose:
- request = info["request"]
- print_request_headers(request, http2=False)
- elif name == "http2.send_request_headers.started" and verbose: # pragma: no cover
- request = info["request"]
- print_request_headers(request, http2=True)
- elif name == "http11.receive_response_headers.complete":
- http_version, status, reason_phrase, headers = info["return_value"]
- print_response_headers(http_version, status, reason_phrase, headers)
- elif name == "http2.receive_response_headers.complete": # pragma: no cover
- status, headers = info["return_value"]
- http_version = b"HTTP/2"
- reason_phrase = None
- print_response_headers(http_version, status, reason_phrase, headers)
-
-
-def download_response(response: Response, download: typing.BinaryIO) -> None:
- console = rich.console.Console()
- console.print()
- content_length = response.headers.get("Content-Length")
- with rich.progress.Progress(
- "[progress.description]{task.description}",
- "[progress.percentage]{task.percentage:>3.0f}%",
- rich.progress.BarColumn(bar_width=None),
- rich.progress.DownloadColumn(),
- rich.progress.TransferSpeedColumn(),
- ) as progress:
- description = f"Downloading [bold]{rich.markup.escape(download.name)}"
- download_task = progress.add_task(
- description,
- total=int(content_length or 0),
- start=content_length is not None,
- )
- for chunk in response.iter_bytes():
- download.write(chunk)
- progress.update(download_task, completed=response.num_bytes_downloaded)
-
-
-def validate_json(
- ctx: click.Context,
- param: typing.Union[click.Option, click.Parameter],
- value: typing.Any,
-) -> typing.Any:
- if value is None:
- return None
-
- try:
- return json.loads(value)
- except json.JSONDecodeError: # pragma: no cover
- raise click.BadParameter("Not valid JSON")
-
-
-def validate_auth(
- ctx: click.Context,
- param: typing.Union[click.Option, click.Parameter],
- value: typing.Any,
-) -> typing.Any:
- if value == (None, None):
- return None
-
- username, password = value
- if password == "-": # pragma: no cover
- password = click.prompt("Password", hide_input=True)
- return (username, password)
-
-
-def handle_help(
- ctx: click.Context,
- param: typing.Union[click.Option, click.Parameter],
- value: typing.Any,
-) -> None:
- if not value or ctx.resilient_parsing:
- return
-
- print_help()
- ctx.exit()
-
-
-@click.command(add_help_option=False)
-@click.argument("url", type=str)
-@click.option(
- "--method",
- "-m",
- "method",
- type=str,
- help=(
- "Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD. "
- "[Default: GET, or POST if a request body is included]"
- ),
-)
-@click.option(
- "--params",
- "-p",
- "params",
- type=(str, str),
- multiple=True,
- help="Query parameters to include in the request URL.",
-)
-@click.option(
- "--content",
- "-c",
- "content",
- type=str,
- help="Byte content to include in the request body.",
-)
-@click.option(
- "--data",
- "-d",
- "data",
- type=(str, str),
- multiple=True,
- help="Form data to include in the request body.",
-)
-@click.option(
- "--files",
- "-f",
- "files",
- type=(str, click.File(mode="rb")),
- multiple=True,
- help="Form files to include in the request body.",
-)
-@click.option(
- "--json",
- "-j",
- "json",
- type=str,
- callback=validate_json,
- help="JSON data to include in the request body.",
-)
-@click.option(
- "--headers",
- "-h",
- "headers",
- type=(str, str),
- multiple=True,
- help="Include additional HTTP headers in the request.",
-)
-@click.option(
- "--cookies",
- "cookies",
- type=(str, str),
- multiple=True,
- help="Cookies to include in the request.",
-)
-@click.option(
- "--auth",
- "auth",
- type=(str, str),
- default=(None, None),
- callback=validate_auth,
- help=(
- "Username and password to include in the request. "
- "Specify '-' for the password to use a password prompt. "
- "Note that using --verbose/-v will expose the Authorization header, "
- "including the password encoding in a trivially reversible format."
- ),
-)
-@click.option(
- "--proxies",
- "proxies",
- type=str,
- default=None,
- help="Send the request via a proxy. Should be the URL giving the proxy address.",
-)
-@click.option(
- "--timeout",
- "timeout",
- type=float,
- default=5.0,
- help=(
- "Timeout value to use for network operations, such as establishing the "
- "connection, reading some data, etc... [Default: 5.0]"
- ),
-)
-@click.option(
- "--follow-redirects",
- "follow_redirects",
- is_flag=True,
- default=False,
- help="Automatically follow redirects.",
-)
-@click.option(
- "--no-verify",
- "verify",
- is_flag=True,
- default=True,
- help="Disable SSL verification.",
-)
-@click.option(
- "--http2",
- "http2",
- type=bool,
- is_flag=True,
- default=False,
- help="Send the request using HTTP/2, if the remote server supports it.",
-)
-@click.option(
- "--download",
- type=click.File("wb"),
- help="Save the response content as a file, rather than displaying it.",
-)
-@click.option(
- "--verbose",
- "-v",
- type=bool,
- is_flag=True,
- default=False,
- help="Verbose. Show request as well as response.",
-)
-@click.option(
- "--help",
- is_flag=True,
- is_eager=True,
- expose_value=False,
- callback=handle_help,
- help="Show this message and exit.",
-)
-def main(
- url: str,
- method: str,
- params: typing.List[typing.Tuple[str, str]],
- content: str,
- data: typing.List[typing.Tuple[str, str]],
- files: typing.List[typing.Tuple[str, click.File]],
- json: str,
- headers: typing.List[typing.Tuple[str, str]],
- cookies: typing.List[typing.Tuple[str, str]],
- auth: typing.Optional[typing.Tuple[str, str]],
- proxies: str,
- timeout: float,
- follow_redirects: bool,
- verify: bool,
- http2: bool,
- download: typing.Optional[typing.BinaryIO],
- verbose: bool,
-) -> None:
- """
- An HTTP command line client.
- Sends a request and displays the response.
- """
- if not method:
- method = "POST" if content or data or files or json else "GET"
-
- try:
- with Client(
- proxies=proxies,
- timeout=timeout,
- verify=verify,
- http2=http2,
- ) as client:
- with client.stream(
- method,
- url,
- params=list(params),
- content=content,
- data=dict(data),
- files=files, # type: ignore
- json=json,
- headers=headers,
- cookies=dict(cookies),
- auth=auth,
- follow_redirects=follow_redirects,
- extensions={"trace": functools.partial(trace, verbose=verbose)},
- ) as response:
- if download is not None:
- download_response(response, download)
- else:
- response.read()
- if response.content:
- print_response(response)
-
- except RequestError as exc:
- console = rich.console.Console()
- console.print(f"[red]{type(exc).__name__}[/red]: {exc}")
- sys.exit(1)
-
- sys.exit(0 if response.is_success else 1)
diff --git a/spaces/DaniilMIPT/greenatomtest/README.md b/spaces/DaniilMIPT/greenatomtest/README.md
deleted file mode 100644
index deec8f5b63b46f393106c8192ec933537dfc2788..0000000000000000000000000000000000000000
--- a/spaces/DaniilMIPT/greenatomtest/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Greenatomtest
-emoji: 🏃
-colorFrom: gray
-colorTo: yellow
-sdk: streamlit
-sdk_version: 1.25.0
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/__init__.py b/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/__init__.py
deleted file mode 100644
index 6ea2bf591294feef8e5c6547a05e7ccd9a5a3697..0000000000000000000000000000000000000000
--- a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from models.lgt_net import LGT_Net
diff --git a/spaces/DebasishDhal99/Youtube_Playlist/playlist_duration.py b/spaces/DebasishDhal99/Youtube_Playlist/playlist_duration.py
deleted file mode 100644
index 879888a05813a9c7b34d17aa5c043725aa6182fe..0000000000000000000000000000000000000000
--- a/spaces/DebasishDhal99/Youtube_Playlist/playlist_duration.py
+++ /dev/null
@@ -1,152 +0,0 @@
-#import pyyoutube
-from datetime import timedelta
-#from pyyoutube import playlist
-import re
-import gradio as gr
-from urllib.parse import urlparse, parse_qs
-from contextlib import suppress
-import os
-
-api_key = os.getenv("api_key_secret")
-import googleapiclient
-from googleapiclient.discovery import build
-from googleapiclient.errors import HttpError
-import datetime
-youtube = build('youtube', 'v3', developerKey=api_key)
-
-def playlist_duration_func(youtubelink,videoid=False):
-
- def playlist_exist_check(playlistlink):
-
- def extract_playlist_id(playlistlink):
- match = re.search(r'list=([^&]+)', playlistlink) #It searches for the string 'list=' followed by >=1 characters that are not '&'.
- if match:
- return match.group(1)
- return None
-
- playlist_id = extract_playlist_id(playlistlink)
-
- if playlist_id is None:
- return False
-
- search_request = youtube.playlists().list(
-
- part='id',
- id=playlist_id,
- maxResults=1
- )
-
- search_response = search_request.execute()
- if 'items' in search_response:
- try:
- playlistdict = search_response['items'][0]
- print("ID of playlist is:- ",playlistdict['id'])
- return playlistdict['id']
- except:
- #print("Video not found.")
- return False
-
- playlistid = playlist_exist_check(youtubelink)
- if playlistid == False or playlistid==None:
- print("Playlist doesn't exist")
- return False
- print("1st check passed - Playlist link is valid")
-
-
-
-
-#This section retrieves the video ids of all the videos in the playlist, and stores them in a list. 50 in one iteration.
-
- vid_ids = []
- next_page_token = None
- while True:
-
-
- pl_request = youtube.playlistItems().list(
- part="contentDetails,snippet",
- playlistId=playlistid,
- maxResults=50, #This is the max limit of videos that can be fetched in one go form a playlist as youtube data v3 API results are paginated
- pageToken=next_page_token
- )
- pl_response = pl_request.execute()
- # print("Reponse obtained from youtube")
-
-
-
- for item in pl_response['items']:
- vid_id = item['contentDetails']['videoId']
- vid_ids.append(vid_id)
- if videoid==True:
- print(item['contentDetails']['videoId'])
-
- next_page_token = pl_response.get("nextPageToken")
- if not next_page_token:
- break
- print("2nd check passed - Playlist read")
-
-
-
-#This section obtains the playlist name from the playlist id
- pl_request = youtube.playlists().list(
- part="snippet",
- id=playlistid,
- maxResults=1
- )
- pl_response = pl_request.execute()
- playlist = pl_response['items'][0]
- title = playlist['snippet']['title']
- print("Playlist Title:", title)
-
-
-
-
-
-
- # title = playlist['snippet']['title']
- # print("Playlist Title:", title)
-#This section retrieves the duration of each video in the playlist, and stores them in a list. 50 in one iteration
-
-
- iterations = len(vid_ids)//50+1
- duration_list = []
- for i in range(iterations):
- start_index = i * 50
- end_index = (i + 1) * 50
- batch_ids = vid_ids[start_index:end_index]
- vid_request = youtube.videos().list(
- part="contentDetails",
- id=','.join(batch_ids)
- )
-
- vid_response = vid_request.execute()
-
-
- for item in vid_response['items']:
- duration = item['contentDetails']['duration']
- duration = duration[2:]
- hours = 0
- minutes = 0
- seconds = 0
-
- if "H" in duration:
- hours_index = duration.index("H")
- hours = int(duration[:hours_index])
- duration = duration[hours_index+1:]
-
- if "M" in duration:
- minutes_index = duration.index("M")
- minutes = int(duration[:minutes_index])
- duration = duration[minutes_index+1:]
-
- if "S" in duration:
- seconds_index = duration.index("S")
- seconds = int(duration[:seconds_index])
-
- duration = timedelta(hours=hours, minutes=minutes, seconds=seconds)
- duration_list.append(duration)
- print("3rd check passed - Individual video duration calculated")
- total_duration = sum(duration_list, timedelta())
- print("Total duration of playlist is:- ",total_duration)
- print("Total no. of videos is = ",len(vid_ids))
- return str(total_duration)
-
diff --git a/spaces/Detomo/Image-Classification/README.md b/spaces/Detomo/Image-Classification/README.md
deleted file mode 100644
index b33ad959c9fbf235409d06ac3dfb211f73608d76..0000000000000000000000000000000000000000
--- a/spaces/Detomo/Image-Classification/README.md
+++ /dev/null
@@ -1,33 +0,0 @@
----
-title: Universal Image Classification
-emoji: 🤖
-colorFrom: blue
-colorTo: pink
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/Dhrushreddy/profile1/app.py b/spaces/Dhrushreddy/profile1/app.py
deleted file mode 100644
index 40da07e54f39ca205c4846a5461ea7d7f0603f8d..0000000000000000000000000000000000000000
--- a/spaces/Dhrushreddy/profile1/app.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import streamlit as st
-st.markdown("
Dhrush
", unsafe_allow_html=True)
-st.markdown("---")
-st.write("Hello Welcome to the new AI world")
-st.write("Hello World")
-st.caption("Hello Python")
\ No newline at end of file
diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/edit/edit_helper.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/edit/edit_helper.py
deleted file mode 100644
index 047e4d29d296306a008f7bb240c18e38e9757500..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/stylegan_human/edit/edit_helper.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# Copyright (c) SenseTime Research. All rights reserved.
-
-from legacy import save_obj, load_pkl
-import torch
-from torch.nn import functional as F
-import pandas as pd
-from .edit_config import attr_dict
-import os
-
-
-def conv_warper(layer, input, style, noise):
- # the conv should change
- conv = layer.conv
- batch, in_channel, height, width = input.shape
-
- style = style.view(batch, 1, in_channel, 1, 1)
- weight = conv.scale * conv.weight * style
-
- if conv.demodulate:
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
- weight = weight * demod.view(batch, conv.out_channel, 1, 1, 1)
-
- weight = weight.view(
- batch * conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
- )
-
- if conv.upsample:
- input = input.view(1, batch * in_channel, height, width)
- weight = weight.view(
- batch, conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
- )
- weight = weight.transpose(1, 2).reshape(
- batch * in_channel, conv.out_channel, conv.kernel_size, conv.kernel_size
- )
- out = F.conv_transpose2d(
- input, weight, padding=0, stride=2, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, conv.out_channel, height, width)
- out = conv.blur(out)
-
- elif conv.downsample:
- input = conv.blur(input)
- _, _, height, width = input.shape
- input = input.view(1, batch * in_channel, height, width)
- out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, conv.out_channel, height, width)
-
- else:
- input = input.view(1, batch * in_channel, height, width)
- out = F.conv2d(input, weight, padding=conv.padding, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, conv.out_channel, height, width)
-
- out = layer.noise(out, noise=noise)
- out = layer.activate(out)
-
- return out
-
-
-def decoder(G, style_space, latent, noise):
- # an decoder warper for G
- out = G.input(latent)
- out = conv_warper(G.conv1, out, style_space[0], noise[0])
- skip = G.to_rgb1(out, latent[:, 1])
-
- i = 1
- for conv1, conv2, noise1, noise2, to_rgb in zip(
- G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
- ):
- out = conv_warper(conv1, out, style_space[i], noise=noise1)
- out = conv_warper(conv2, out, style_space[i+1], noise=noise2)
- skip = to_rgb(out, latent[:, i + 2], skip)
- i += 2
- image = skip
-
- return image
-
-
-def encoder_ifg(G, noise, attr_name, truncation=1, truncation_latent=None,
- latent_dir='latent_direction/ss/',
- step=0, total=0, real=False):
- if not real:
- styles = [noise]
- styles = [G.style(s) for s in styles]
- style_space = []
-
- if truncation < 1:
- if not real:
- style_t = []
- for style in styles:
- style_t.append(truncation_latent + truncation *
- (style - truncation_latent))
- styles = style_t
- else: # styles are latent (tensor: 1,18,512), for real PTI output
- truncation_latent = truncation_latent.repeat(
- 18, 1).unsqueeze(0) # (1,512) --> (1,18,512)
- styles = torch.add(truncation_latent, torch.mul(
- torch.sub(noise, truncation_latent), truncation))
-
- noise = [getattr(G.noises, 'noise_{}'.format(i))
- for i in range(G.num_layers)]
- if not real:
- inject_index = G.n_latent
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- else:
- latent = styles
-
- style_space.append(G.conv1.conv.modulation(latent[:, 0]))
- i = 1
- for conv1, conv2, noise1, noise2, to_rgb in zip(
- G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
- ):
- style_space.append(conv1.conv.modulation(latent[:, i]))
- style_space.append(conv2.conv.modulation(latent[:, i+1]))
- i += 2
-
- # get layer, strength by dict
- strength = attr_dict['interface_gan'][attr_name][0]
-
- if step != 0 and total != 0:
- strength = step / total * strength
- for i in range(15):
- style_vect = load_pkl(os.path.join(
- latent_dir, '{}/style_vect_mean_{}.pkl'.format(attr_name, i)))
- style_vect = torch.from_numpy(style_vect).to(latent.device).float()
- style_space[i] += style_vect * strength
-
- return style_space, latent, noise
-
-
-def encoder_ss(G, noise, attr_name, truncation=1, truncation_latent=None,
- statics_dir="latent_direction/ss_statics",
- latent_dir="latent_direction/ss/",
- step=0, total=0, real=False):
- if not real:
- styles = [noise]
- styles = [G.style(s) for s in styles]
- style_space = []
-
- if truncation < 1:
- if not real:
- style_t = []
- for style in styles:
- style_t.append(
- truncation_latent + truncation *
- (style - truncation_latent)
- )
- styles = style_t
- else: # styles are latent (tensor: 1,18,512), for real PTI output
- truncation_latent = truncation_latent.repeat(
- 18, 1).unsqueeze(0) # (1,512) --> (1,18,512)
- styles = torch.add(truncation_latent, torch.mul(
- torch.sub(noise, truncation_latent), truncation))
-
- noise = [getattr(G.noises, 'noise_{}'.format(i))
- for i in range(G.num_layers)]
-
- if not real:
- inject_index = G.n_latent
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- else:
- latent = styles
-
- style_space.append(G.conv1.conv.modulation(latent[:, 0]))
- i = 1
- for conv1, conv2, noise1, noise2, to_rgb in zip(
- G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
- ):
- style_space.append(conv1.conv.modulation(latent[:, i]))
- style_space.append(conv2.conv.modulation(latent[:, i+1]))
- i += 2
- # get threshold, layer, strength by dict
- layer, strength, threshold = attr_dict['stylespace'][attr_name]
-
- statis_dir = os.path.join(
- statics_dir, "{}_statis/{}".format(attr_name, layer))
- statis_csv_path = os.path.join(statis_dir, "statis.csv")
- statis_df = pd.read_csv(statis_csv_path)
- statis_df = statis_df.sort_values(by='channel', ascending=True)
- ch_mask = statis_df['strength'].values
- ch_mask = torch.from_numpy(ch_mask).to(latent.device).float()
- ch_mask = (ch_mask.abs() > threshold).float()
- style_vect = load_pkl(os.path.join(
- latent_dir, '{}/style_vect_mean_{}.pkl'.format(attr_name, layer)))
- style_vect = torch.from_numpy(style_vect).to(latent.device).float()
-
- style_vect = style_vect * ch_mask
-
- if step != 0 and total != 0:
- strength = step / total * strength
-
- style_space[layer] += style_vect * strength
-
- return style_space, latent, noise
-
-
-def encoder_sefa(G, noise, attr_name, truncation=1, truncation_latent=None,
- latent_dir='latent_direction/sefa/',
- step=0, total=0, real=False):
- if not real:
- styles = [noise]
- styles = [G.style(s) for s in styles]
-
- if truncation < 1:
- if not real:
- style_t = []
- for style in styles:
- style_t.append(
- truncation_latent + truncation *
- (style - truncation_latent)
- )
- styles = style_t
- else:
- truncation_latent = truncation_latent.repeat(
- 18, 1).unsqueeze(0) # (1,512) --> (1,18,512)
- styles = torch.add(truncation_latent, torch.mul(
- torch.sub(noise, truncation_latent), truncation))
-
- noise = [getattr(G.noises, 'noise_{}'.format(i))
- for i in range(G.num_layers)]
- if not real:
- inject_index = G.n_latent
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- else:
- latent = styles
-
- layer, strength = attr_dict['sefa'][attr_name]
-
- sefa_vect = torch.load(os.path.join(
- latent_dir, '{}.pt'.format(attr_name))).to(latent.device).float()
- if step != 0 and total != 0:
- strength = step / total * strength
- for l in layer:
- latent[:, l, :] += (sefa_vect * strength * 2)
-
- return latent, noise
diff --git a/spaces/ECCV2022/PSG/OpenPSG/configs/gpsnet/panoptic_fpn_r50_fpn_1x_sgdet_psg.py b/spaces/ECCV2022/PSG/OpenPSG/configs/gpsnet/panoptic_fpn_r50_fpn_1x_sgdet_psg.py
deleted file mode 100644
index 78165a4ce56b57819445d8d58840c6f9fca5f4a8..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/PSG/OpenPSG/configs/gpsnet/panoptic_fpn_r50_fpn_1x_sgdet_psg.py
+++ /dev/null
@@ -1,45 +0,0 @@
-_base_ = [
- '../motifs/panoptic_fpn_r50_fpn_1x_predcls_psg.py',
-]
-
-model = dict(
- relation_head=dict(
- type='GPSHead',
- head_config=dict(
- # NOTE: Evaluation type
- use_gt_box=False,
- use_gt_label=False,
- ),
- ),
- roi_head=dict(bbox_head=dict(type='SceneGraphBBoxHead'), ),
-)
-
-evaluation = dict(
- interval=1,
- metric='sgdet',
- relation_mode=True,
- classwise=True,
- iou_thrs=0.5,
- detection_method='pan_seg',
-)
-
-data = dict(samples_per_gpu=16)
-
-# Log config
-project_name = 'openpsg'
-expt_name = 'gpsnet_panoptic_fpn_r50_fpn_1x_sgdet_psg'
-work_dir = f'./work_dirs/{expt_name}'
-
-log_config = dict(
- interval=50,
- hooks=[
- dict(type='TextLoggerHook'),
- dict(
- type='WandbLoggerHook',
- init_kwargs=dict(
- project=project_name,
- name=expt_name,
- ),
- ),
- ],
-)
diff --git a/spaces/ECCV2022/storydalle/dalle/models/stage2/transformer.py b/spaces/ECCV2022/storydalle/dalle/models/stage2/transformer.py
deleted file mode 100644
index fc74a2992813d65d364b5562e8912398af61135e..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/storydalle/dalle/models/stage2/transformer.py
+++ /dev/null
@@ -1,502 +0,0 @@
-# ------------------------------------------------------------------------------------
-# Minimal DALL-E
-# Copyright (c) 2021 KakaoBrain. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------------------
-# Modified from minGPT (https://github.com/karpathy/minGPT)
-# Copyright (c) 2020 Andrej Karpathy. All Rights Reserved.
-# ------------------------------------------------------------------------------------
-
-import torch
-import torch.nn as nn
-from typing import Optional, Tuple, List
-from torch.cuda.amp import autocast
-from omegaconf import OmegaConf
-from .layers import Block
-
-class Transformer1d(nn.Module):
-
- def __init__(self,
- vocab_size_txt: int,
- vocab_size_img: int,
- hparams: OmegaConf) -> None:
- super().__init__()
- assert hparams.n_layers == hparams.n_dense_layers
-
- # input embedding for image and text
- self.tok_emb_img = nn.Embedding(vocab_size_img, hparams.embed_dim)
- self.tok_emb_txt = nn.Embedding(vocab_size_txt, hparams.embed_dim)
-
- self.pos_emb_img = nn.Embedding(hparams.ctx_len_img, hparams.embed_dim)
- self.pos_emb_txt = nn.Embedding(hparams.ctx_len_txt, hparams.embed_dim)
-
- self.drop = nn.Dropout(hparams.embd_pdrop)
-
- # transformer blocks
- self.blocks = [Block(ctx_len=hparams.ctx_len_img + hparams.ctx_len_txt,
- embed_dim=hparams.embed_dim,
- n_heads=hparams.n_heads,
- mlp_bias=hparams.mlp_bias,
- attn_bias=hparams.attn_bias,
- resid_pdrop=hparams.resid_pdrop,
- attn_pdrop=hparams.attn_pdrop,
- gelu_use_approx=hparams.gelu_use_approx) for i in range(1, hparams.n_layers+1)]
- self.blocks = nn.Sequential(*self.blocks)
-
- # heads for image and text
- self.ln_f = nn.LayerNorm(hparams.embed_dim)
- self.head_img = nn.Linear(hparams.embed_dim, vocab_size_img, bias=False)
- self.head_txt = nn.Linear(hparams.embed_dim, vocab_size_txt, bias=False)
-
- self.ctx_len_img = hparams.ctx_len_img
- self.ctx_len_txt = hparams.ctx_len_txt
- self.n_layers = hparams.n_layers
-
- self.apply(self._init_weights)
-
-
- def _init_weights(self, module: nn.Module) -> None:
- if isinstance(module, (nn.Linear, nn.Embedding)):
- module.weight.data.normal_(mean=0.0, std=0.02)
- if isinstance(module, nn.Linear) and module.bias is not None:
- module.bias.data.zero_()
- elif isinstance(module, nn.LayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
-
-
- def resize_token_embeddings(self, new_num_tokens):
-
- old_num_tokens, old_embedding_dim = self.tok_emb_txt.weight.size()
- new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
- new_embeddings.to(self.tok_emb_txt.weight.device, dtype=self.tok_emb_txt.weight.dtype)
- self._init_weights(new_embeddings)
- # numbers of tokens to copy
- n = min(old_num_tokens, new_num_tokens)
- new_embeddings.weight.data[:n, :] = self.tok_emb_txt.weight.data[:n, :]
- self.tok_emb_txt = new_embeddings
-
- self.resize_lm_head(new_num_tokens)
- # TODO: also change config to reflect new vocab size
-
- return new_embeddings
-
-
- def resize_lm_head(
- self, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False) -> nn.Linear:
-
- old_num_tokens, old_lm_head_dim = (
- self.head_txt.weight.size() if not transposed else self.head_txt.weight.t().size()
- )
- # Build new lm head
- new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim)
- has_new_lm_head_bias = self.head_txt.bias is not None
- new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias)
- new_lm_head = new_lm_head.to(self.head_txt.weight.device, dtype=self.head_txt.weight.dtype)
-
- # initialize new lm head (in particular added tokens)
- self._init_weights(new_lm_head)
- num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
- # Copy old lm head weights to new lm head
- if not transposed:
- new_lm_head.weight.data[:num_tokens_to_copy, :] = self.head_txt.weight.data[:num_tokens_to_copy, :]
- else:
- new_lm_head.weight.data[:, :num_tokens_to_copy] = self.head_txt.weight.data[:, :num_tokens_to_copy]
-
- # Copy bias weights to new lm head
- if has_new_lm_head_bias:
- new_lm_head.bias.data[:num_tokens_to_copy] = self.head_txt.bias.data[:num_tokens_to_copy]
-
- self.head_txt = new_lm_head
-
- return new_lm_head
-
-
- def forward(self,
- images: torch.LongTensor,
- texts: torch.LongTensor,
- pos_images: torch.LongTensor,
- pos_texts: torch.LongTensor,
- past: Optional[List[torch.Tensor]] = None,
- prompt: Optional[List[torch.Tensor]] = None,
- pos_prompt: Optional[List[torch.Tensor]] = None) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
-
-
- B, T = images.shape
- _, N = texts.shape
-
- assert T <= self.ctx_len_img, "Already reached the maximum context length (image)."
- assert N == self.ctx_len_txt, "Already reached the maximum context length (text)."
-
- texts = self.tok_emb_txt(texts)
- images = self.tok_emb_img(images)
-
- texts = texts + self.pos_emb_txt(pos_texts)
- images = images + self.pos_emb_img(pos_images)
-
- if prompt is not None:
- prompt = prompt + self.pos_emb_txt(pos_prompt)
- texts = torch.cat([prompt, texts], dim=1).contiguous()
- P = prompt.shape[1]
-
- x = torch.cat([texts, images], dim=1).contiguous()
- x = self.drop(x)
-
- # x = self.blocks(x)
- for i, block in enumerate(self.blocks):
- x, _ = block.sample(x, layer_past=None if past is None else past[i])
-
- x = self.ln_f(x)
-
- if prompt is not None:
- texts = x[:, P:N+P-1].contiguous()
- images = x[:, N+P-1:-1].contiguous()
- else:
- texts = x[:, :N-1].contiguous()
- images = x[:, N-1:-1].contiguous()
-
- logits_txt = self.head_txt(texts)
- logits_img = self.head_img(images)
- return logits_img, logits_txt
-
- def forward_with_context(self,
- images: torch.LongTensor,
- texts: torch.LongTensor,
- pos_images: torch.LongTensor,
- pos_texts: torch.LongTensor,
- src_images: torch.LongTensor,
- src_pos_images: torch.LongTensor,
- cross_attention_idxs: List,
- cross_attention_layers,
- past: Optional[List[torch.Tensor]] = None,
- prompt: Optional[List[torch.Tensor]] = None,
- pos_prompt: Optional[List[torch.Tensor]] = None) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
-
-
- B, T = images.shape
- _, N = texts.shape
-
- assert T <= self.ctx_len_img, "Already reached the maximum context length (image)."
- assert N == self.ctx_len_txt, "Already reached the maximum context length (text)."
-
- texts = self.tok_emb_txt(texts)
- images = self.tok_emb_img(images)
- src_images = self.tok_emb_img(src_images)
-
- texts = texts + self.pos_emb_txt(pos_texts)
- images = images + self.pos_emb_img(pos_images)
- src_images = src_images + self.pos_emb_img(src_pos_images)
-
- if prompt is not None:
- prompt = prompt + self.pos_emb_txt(pos_prompt)
- texts = torch.cat([prompt, texts], dim=1).contiguous()
- P = prompt.shape[1]
- else:
- P = 0
-
- x = torch.cat([texts, images], axis=1).contiguous()
- x = self.drop(x)
-
- # prepare mask
- mask = torch.zeros_like(x[0])
- mask[self.ctx_len_txt+P-1:, :].fill_(1.0)
- mask = mask.unsqueeze(0)
-
- # print(images.shape, texts.shape, src_images.shape, mask.shape, x.shape)
-
- # x = self.blocks(x)
- for i, block in enumerate(self.blocks):
- if i in cross_attention_idxs:
- x, _ = block.sample_with_context(x, src_images, mask, cross_attention_layers[int(((i+1)/3)-1)], layer_past=None if past is None else past[i])
- else:
- x, _ = block.sample(x, layer_past=None if past is None else past[i])
-
- x = self.ln_f(x)
-
- if prompt is not None:
- texts = x[:, P:N+P-1].contiguous()
- images = x[:, N+P-1:-1].contiguous()
- else:
- texts = x[:, :N-1].contiguous()
- images = x[:, N-1:-1].contiguous()
-
- logits_txt = self.head_txt(texts)
- logits_img = self.head_img(images)
- return logits_img, logits_txt
-
- @torch.no_grad()
- def sampling(self,
- images: torch.LongTensor,
- texts: torch.LongTensor,
- pos_images: torch.LongTensor,
- pos_texts: torch.LongTensor,
- use_fp16: bool = True,
- past: Optional[List[torch.Tensor]] = None,
- prompt: Optional[List[torch.Tensor]] = None,
- pos_prompt: Optional[List[torch.Tensor]] = None) -> Tuple[torch.FloatTensor, List[torch.FloatTensor]]:
-
- _, N = texts.shape
- assert N == self.ctx_len_txt, "Already reached the maximum context length (text)."
-
- with autocast(enabled=use_fp16):
- if images is None:
- # assert past is None
-
- texts = self.tok_emb_txt(texts)
- x = texts + self.pos_emb_txt(pos_texts)
-
- if prompt is not None:
- prompt = prompt + self.pos_emb_txt(pos_prompt)
- texts = torch.cat([prompt, texts], dim=1).contiguous()
-
- x = self.drop(x)
-
- if past is not None:
- past = torch.cat(past, dim=-2)
-
- presents = []
- for i, block in enumerate(self.blocks):
- x, present = block.sample(x, layer_past=None if past is None else past[i])
- presents.append(present)
- x = self.ln_f(x)
- x = x[:, N-1].contiguous()
- logits = self.head_img(x)
- else:
- if past is None:
- texts = self.tok_emb_txt(texts)
- images = self.tok_emb_img(images)
- texts = texts + self.pos_emb_txt(pos_texts)
- images = images + self.pos_emb_img(pos_images)
-
- if prompt is not None:
- prompt = prompt + self.pos_emb_txt(pos_prompt)
- texts = torch.cat([prompt, texts], dim=1).contiguous()
-
- x = torch.cat([texts, images], axis=1).contiguous()
- else:
- images = self.tok_emb_img(images)
- x = images + self.pos_emb_img(pos_images)
- x = self.drop(x)
-
- # if past is not None and len(past) > 1:
- if past is not None:
- past = torch.cat(past, dim=-2)
- # print('Past', past.shape)
- presents = []
- # print(len(past), past[0].shape)
- for i, block in enumerate(self.blocks):
- x, present = block.sample(x, layer_past=None if past is None else past[i])
- presents.append(present)
- x = self.ln_f(x)
- x = x[:, -1].contiguous()
- logits = self.head_img(x)
- return logits, presents
-
- @torch.no_grad()
- def sampling_with_context(self,
- images: torch.LongTensor,
- cross_attention_idxs,
- cross_attention_layers,
- texts: torch.LongTensor,
- pos_images: torch.LongTensor,
- pos_texts: torch.LongTensor,
- source_image: torch.LongTensor,
- use_fp16: bool = True,
- past: Optional[List[torch.Tensor]] = None,
- prompt: Optional[List[torch.Tensor]] = None,
- pos_prompt: Optional[List[torch.Tensor]] = None
- ) -> Tuple[torch.FloatTensor, List[torch.FloatTensor]]:
-
- _, N = texts.shape
- assert N == self.ctx_len_txt, "Already reached the maximum context length (text)."
-
- if prompt is not None:
- P = prompt.shape[1]
- else:
- P = 0
-
- with autocast(enabled=use_fp16):
- if images is None:
- # assert past is None
-
- texts = self.tok_emb_txt(texts)
- texts = texts + self.pos_emb_txt(pos_texts)
-
- if prompt is not None:
- prompt = prompt + self.pos_emb_txt(pos_prompt)
- texts = torch.cat([prompt, texts], dim=1).contiguous()
-
- x = self.drop(texts)
-
- if past is not None:
- past = torch.cat(past, dim=-2)
-
- # prepare mask
- mask = torch.zeros_like(x[0])
- mask[self.ctx_len_txt+P - 1:, :].fill_(1.0)
- mask = mask.unsqueeze(0)
-
- presents = []
- for i, block in enumerate(self.blocks):
- if i in cross_attention_idxs:
- x, present = block.sample_with_context(x, source_image, mask,
- cross_attention_layers[int(((i + 1) / 3) - 1)],
- layer_past=None if past is None else past[i])
- else:
- x, present = block.sample(x, layer_past=None if past is None else past[i])
- presents.append(present)
- x = self.ln_f(x)
- x = x[:, N-1].contiguous()
- logits = self.head_img(x)
- else:
- if past is None:
- texts = self.tok_emb_txt(texts)
- images = self.tok_emb_img(images)
- texts = texts + self.pos_emb_txt(pos_texts)
- images = images + self.pos_emb_img(pos_images)
-
- if prompt is not None:
- prompt = prompt + self.pos_emb_txt(pos_prompt)
- texts = torch.cat([prompt, texts], dim=1).contiguous()
-
- x = torch.cat([texts, images], axis=1).contiguous()
- else:
- images = self.tok_emb_img(images)
- x = images + self.pos_emb_img(pos_images)
- x = self.drop(x)
-
- # if past is not None and len(past) > 1:
- if past is not None:
- past = torch.cat(past, dim=-2)
- presents = []
-
- # prepare mask
- mask = torch.zeros_like(x[0])
- mask[self.ctx_len_txt+P - 1:, :].fill_(1.0)
- mask = mask.unsqueeze(0)
-
- # print(len(past), past[0].shape)
- for i, block in enumerate(self.blocks):
- if i in cross_attention_idxs:
- x, present = block.sample_with_context(x, source_image, mask,
- cross_attention_layers[int(((i + 1) / 3) - 1)],
- layer_past=None if past is None else past[i])
- else:
- x, present = block.sample(x, layer_past=None if past is None else past[i])
- presents.append(present)
- x = self.ln_f(x)
- x = x[:, -1].contiguous()
- logits = self.head_img(x)
- return logits, presents
-
- def from_ckpt(self, path: str) -> None:
- ckpt = torch.load(path, map_location='cpu')['state_dict']
- self.load_state_dict(ckpt, strict=True)
- print(f'{path} succesfully restored..')
-
-
-class iGPT(nn.Module):
- def __init__(self,
- vocab_size_img: int,
- use_cls_cond: bool,
- hparams: OmegaConf) -> None:
- super().__init__()
- self.use_cls_cond = use_cls_cond
-
- # sos token embedding
- if self.use_cls_cond:
- self.sos = nn.Embedding(hparams.n_classes, hparams.embed_dim)
- else:
- self.sos = nn.Parameter(torch.randn(1, 1, hparams.embed_dim))
-
- # input embedding
- self.tok_emb_img = nn.Embedding(vocab_size_img, hparams.embed_dim)
- self.pos_emb_img = nn.Embedding(hparams.ctx_len_img, hparams.embed_dim)
-
- self.drop = nn.Dropout(hparams.embd_pdrop)
-
- # transformer blocks
- self.blocks = [Block(ctx_len=hparams.ctx_len_img + 1,
- embed_dim=hparams.embed_dim,
- n_heads=hparams.n_heads,
- mlp_bias=hparams.mlp_bias,
- attn_bias=hparams.attn_bias,
- resid_pdrop=hparams.resid_pdrop,
- attn_pdrop=hparams.attn_pdrop,
- gelu_use_approx=hparams.gelu_use_approx) for i in range(1, hparams.n_layers+1)]
- self.blocks = nn.Sequential(*self.blocks)
-
- # head
- self.ln_f = nn.LayerNorm(hparams.embed_dim)
- self.head = nn.Linear(hparams.embed_dim, vocab_size_img, bias=False)
-
- self.ctx_len_img = hparams.ctx_len_img
- self.n_layers = hparams.n_layers
-
- self.apply(self._init_weights)
-
- def _init_weights(self, module: nn.Module) -> None:
- if isinstance(module, (nn.Linear, nn.Embedding)):
- module.weight.data.normal_(mean=0.0, std=0.02)
- if isinstance(module, nn.Linear) and module.bias is not None:
- module.bias.data.zero_()
- elif isinstance(module, nn.LayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
-
- @torch.no_grad()
- def sampling(self,
- sos: torch.FloatTensor,
- codes: torch.LongTensor,
- pos_codes: torch.LongTensor,
- n_samples: int = 16,
- use_fp16: bool = True,
- past: Optional[torch.Tensor] = None) -> Tuple[torch.FloatTensor, List[torch.FloatTensor]]:
- with autocast(enabled=use_fp16):
- if codes is None:
- assert past is None
- xs = self.drop(sos)
- presents = []
- for i, block in enumerate(self.blocks):
- xs, present = block.sample(xs, layer_past=None)
- presents.append(present)
- xs = self.ln_f(xs)
- logits = self.head(xs)[:, -1]
- else:
- if past is None:
- xs = self.tok_emb_img(codes) + self.pos_emb_img(pos_codes)
- xs = torch.cat([sos, xs], dim=1)
- else:
- xs = self.tok_emb_img(codes) + self.pos_emb_img(pos_codes)
- xs = self.drop(xs)
-
- past = torch.cat(past, dim=-2) if past is not None else past
- presents = []
- for i, block in enumerate(self.blocks):
- xs, present = block.sample(xs, layer_past=None if past is None else past[i])
- presents.append(present)
-
- xs = self.ln_f(xs)
- logits = self.head(xs)[:, -1]
- return logits, presents
-
- def forward(self,
- codes: torch.LongTensor,
- labels: Optional[torch.LongTensor] = None) -> torch.FloatTensor:
- B, T = codes.shape
- xps = torch.arange(T, device=codes.device).repeat((B, 1))
- sos = self.sos.repeat((B, 1, 1)) if labels is None else self.sos(labels).unsqueeze(1)
-
- h = self.tok_emb_img(codes) + self.pos_emb_img(xps)
- h = torch.cat([sos, h[:, :-1]], dim=1).contiguous()
-
- h = self.drop(h)
- h = self.blocks(h)
- h = self.ln_f(h)
- logits = self.head(h)
- return logits
-
- def from_ckpt(self, path: str, strict: bool = True) -> None:
- ckpt = torch.load(path, map_location='cpu')['state_dict']
- self.load_state_dict(ckpt, strict=strict)
- print(f'{path} successfully restored..')
diff --git a/spaces/Eddycrack864/Applio-Inference/train/process_ckpt.py b/spaces/Eddycrack864/Applio-Inference/train/process_ckpt.py
deleted file mode 100644
index e3c3dba6df4b4f71a4d0865cdc96241d17da8781..0000000000000000000000000000000000000000
--- a/spaces/Eddycrack864/Applio-Inference/train/process_ckpt.py
+++ /dev/null
@@ -1,259 +0,0 @@
-import torch, traceback, os, pdb, sys
-
-now_dir = os.getcwd()
-sys.path.append(now_dir)
-from collections import OrderedDict
-from i18n import I18nAuto
-
-i18n = I18nAuto()
-
-
-def savee(ckpt, sr, if_f0, name, epoch, version, hps):
- try:
- opt = OrderedDict()
- opt["weight"] = {}
- for key in ckpt.keys():
- if "enc_q" in key:
- continue
- opt["weight"][key] = ckpt[key].half()
- opt["config"] = [
- hps.data.filter_length // 2 + 1,
- 32,
- hps.model.inter_channels,
- hps.model.hidden_channels,
- hps.model.filter_channels,
- hps.model.n_heads,
- hps.model.n_layers,
- hps.model.kernel_size,
- hps.model.p_dropout,
- hps.model.resblock,
- hps.model.resblock_kernel_sizes,
- hps.model.resblock_dilation_sizes,
- hps.model.upsample_rates,
- hps.model.upsample_initial_channel,
- hps.model.upsample_kernel_sizes,
- hps.model.spk_embed_dim,
- hps.model.gin_channels,
- hps.data.sampling_rate,
- ]
- opt["info"] = "%sepoch" % epoch
- opt["sr"] = sr
- opt["f0"] = if_f0
- opt["version"] = version
- torch.save(opt, "weights/%s.pth" % name)
- return "Success."
- except:
- return traceback.format_exc()
-
-
-def show_info(path):
- try:
- a = torch.load(path, map_location="cpu")
- return "Epochs: %s\nSample rate: %s\nPitch guidance: %s\nRVC Version: %s" % (
- a.get("info", "None"),
- a.get("sr", "None"),
- a.get("f0", "None"),
- a.get("version", "None"),
- )
- except:
- return traceback.format_exc()
-
-
-def extract_small_model(path, name, sr, if_f0, info, version):
- try:
- ckpt = torch.load(path, map_location="cpu")
- if "model" in ckpt:
- ckpt = ckpt["model"]
- opt = OrderedDict()
- opt["weight"] = {}
- for key in ckpt.keys():
- if "enc_q" in key:
- continue
- opt["weight"][key] = ckpt[key].half()
- if sr == "40k":
- opt["config"] = [
- 1025,
- 32,
- 192,
- 192,
- 768,
- 2,
- 6,
- 3,
- 0,
- "1",
- [3, 7, 11],
- [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
- [10, 10, 2, 2],
- 512,
- [16, 16, 4, 4],
- 109,
- 256,
- 40000,
- ]
- elif sr == "48k":
- if version == "v1":
- opt["config"] = [
- 1025,
- 32,
- 192,
- 192,
- 768,
- 2,
- 6,
- 3,
- 0,
- "1",
- [3, 7, 11],
- [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
- [10, 6, 2, 2, 2],
- 512,
- [16, 16, 4, 4, 4],
- 109,
- 256,
- 48000,
- ]
- else:
- opt["config"] = [
- 1025,
- 32,
- 192,
- 192,
- 768,
- 2,
- 6,
- 3,
- 0,
- "1",
- [3, 7, 11],
- [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
- [12, 10, 2, 2],
- 512,
- [24, 20, 4, 4],
- 109,
- 256,
- 48000,
- ]
- elif sr == "32k":
- if version == "v1":
- opt["config"] = [
- 513,
- 32,
- 192,
- 192,
- 768,
- 2,
- 6,
- 3,
- 0,
- "1",
- [3, 7, 11],
- [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
- [10, 4, 2, 2, 2],
- 512,
- [16, 16, 4, 4, 4],
- 109,
- 256,
- 32000,
- ]
- else:
- opt["config"] = [
- 513,
- 32,
- 192,
- 192,
- 768,
- 2,
- 6,
- 3,
- 0,
- "1",
- [3, 7, 11],
- [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
- [10, 8, 2, 2],
- 512,
- [20, 16, 4, 4],
- 109,
- 256,
- 32000,
- ]
- if info == "":
- info = "Extracted model."
- opt["info"] = info
- opt["version"] = version
- opt["sr"] = sr
- opt["f0"] = int(if_f0)
- torch.save(opt, "weights/%s.pth" % name)
- return "Success."
- except:
- return traceback.format_exc()
-
-
-def change_info(path, info, name):
- try:
- ckpt = torch.load(path, map_location="cpu")
- ckpt["info"] = info
- if name == "":
- name = os.path.basename(path)
- torch.save(ckpt, "weights/%s" % name)
- return "Success."
- except:
- return traceback.format_exc()
-
-
-def merge(path1, path2, alpha1, sr, f0, info, name, version):
- try:
-
- def extract(ckpt):
- a = ckpt["model"]
- opt = OrderedDict()
- opt["weight"] = {}
- for key in a.keys():
- if "enc_q" in key:
- continue
- opt["weight"][key] = a[key]
- return opt
-
- ckpt1 = torch.load(path1, map_location="cpu")
- ckpt2 = torch.load(path2, map_location="cpu")
- cfg = ckpt1["config"]
- if "model" in ckpt1:
- ckpt1 = extract(ckpt1)
- else:
- ckpt1 = ckpt1["weight"]
- if "model" in ckpt2:
- ckpt2 = extract(ckpt2)
- else:
- ckpt2 = ckpt2["weight"]
- if sorted(list(ckpt1.keys())) != sorted(list(ckpt2.keys())):
- return "Fail to merge the models. The model architectures are not the same."
- opt = OrderedDict()
- opt["weight"] = {}
- for key in ckpt1.keys():
- # try:
- if key == "emb_g.weight" and ckpt1[key].shape != ckpt2[key].shape:
- min_shape0 = min(ckpt1[key].shape[0], ckpt2[key].shape[0])
- opt["weight"][key] = (
- alpha1 * (ckpt1[key][:min_shape0].float())
- + (1 - alpha1) * (ckpt2[key][:min_shape0].float())
- ).half()
- else:
- opt["weight"][key] = (
- alpha1 * (ckpt1[key].float()) + (1 - alpha1) * (ckpt2[key].float())
- ).half()
- # except:
- # pdb.set_trace()
- opt["config"] = cfg
- """
- if(sr=="40k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 10, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 40000]
- elif(sr=="48k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,6,2,2,2], 512, [16, 16, 4, 4], 109, 256, 48000]
- elif(sr=="32k"):opt["config"] = [513, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 4, 2, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 32000]
- """
- opt["sr"] = sr
- opt["f0"] = 1 if f0 else 0
- opt["version"] = version
- opt["info"] = info
- torch.save(opt, "weights/%s.pth" % name)
- return "Success."
- except:
- return traceback.format_exc()
diff --git a/spaces/EuroPython2022/YOLOv5/app.py b/spaces/EuroPython2022/YOLOv5/app.py
deleted file mode 100644
index c97ae77188aaec082b992e5010cd285f189e2ff3..0000000000000000000000000000000000000000
--- a/spaces/EuroPython2022/YOLOv5/app.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import gradio as gr
-import torch
-from PIL import Image
-
-# Images
-torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
-torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/bus.jpg', 'bus.jpg')
-
-# Model
-model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # force_reload=True to update
-
-
-def yolo(im, size=640):
- g = (size / max(im.size)) # gain
- im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
-
- results = model(im) # inference
- results.render() # updates results.imgs with boxes and labels
- return Image.fromarray(results.imgs[0])
-
-
-inputs = gr.inputs.Image(type='pil', label="Original Image")
-outputs = gr.outputs.Image(type="pil", label="Output Image")
-
-title = "YOLOv5"
-description = "## Gradio demo for [ultralytics/yolov5](https://github.com/ultralytics/yolov5)"
-article = "
YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. Source code | PyTorch Hub
"
-
-examples = [['zidane.jpg'], ['bus.jpg']]
-gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()
\ No newline at end of file
diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_block_shifting.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_block_shifting.py
deleted file mode 100644
index 8b853f4fb4993f3fb12f5da5d0f24cc3c2964f3f..0000000000000000000000000000000000000000
--- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_block_shifting.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import numpy as np
-import os
-import pybullet as p
-import random
-from cliport.tasks import primitives
-from cliport.tasks.grippers import Spatula
-from cliport.tasks.task import Task
-from cliport.utils import utils
-import numpy as np
-from cliport.tasks.task import Task
-from cliport.utils import utils
-import pybullet as p
-
-class ColorCoordinatedBlockShifting(Task):
- """Pick up each block and precisely place it in the zone of the same color."""
-
- def __init__(self):
- super().__init__()
- self.max_steps = 15
- self.lang_template = "move the {color} blocks to the {color} zone"
- self.task_completed_desc = "done moving blocks."
- self.additional_reset()
-
- def reset(self, env):
- super().reset(env)
-
- # Add zones.
- zone_size = (0.12, 0.12, 0)
- zone_urdf = 'zone/zone.urdf'
- zone_colors = ['yellow', 'blue', 'green']
- zone_poses = []
- for color in zone_colors:
- zone_pose = self.get_random_pose(env, zone_size)
- env.add_object(zone_urdf, zone_pose, 'fixed', color=utils.COLORS[color])
- zone_poses.append(zone_pose)
-
- # Add blocks.
- block_size = (0.04, 0.04, 0.04)
- block_urdf = 'stacking/block.urdf'
- blocks = []
- for color in zone_colors:
- for _ in range(3):
- block_pose = self.get_random_pose(env, block_size)
- block_id = env.add_object(block_urdf, block_pose, color=utils.COLORS[color])
- blocks.append(block_id)
-
- # Add small blocks as obstacles.
- small_block_size = (0.02, 0.02, 0.02)
- small_block_urdf = 'stacking/block.urdf'
- for _ in range(5):
- small_block_pose = self.get_random_pose(env, small_block_size)
- env.add_object(small_block_urdf, small_block_pose)
-
- # Goal: each block is in the zone of the same color.
- for i in range(9):
- self.add_goal(objs=[blocks[i]], matches=np.ones((1, 1)), targ_poses=[zone_poses[i//3]], replace=False,
- rotations=True, metric='pose', params=None, step_max_reward=1/9,
- language_goal=self.lang_template.format(color=zone_colors[i//3]))
\ No newline at end of file
diff --git a/spaces/Gen-Sim/Gen-Sim/misc/copy_all_videos.py b/spaces/Gen-Sim/Gen-Sim/misc/copy_all_videos.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/GeorgeOrville/bingo/src/lib/utils.ts b/spaces/GeorgeOrville/bingo/src/lib/utils.ts
deleted file mode 100644
index 0a09ddc4aa5518f681a00a64ad48566516f35417..0000000000000000000000000000000000000000
--- a/spaces/GeorgeOrville/bingo/src/lib/utils.ts
+++ /dev/null
@@ -1,158 +0,0 @@
-import { clsx, type ClassValue } from 'clsx'
-import { customAlphabet } from 'nanoid'
-import { twMerge } from 'tailwind-merge'
-
-export function cn(...inputs: ClassValue[]) {
- return twMerge(clsx(inputs))
-}
-
-export const nanoid = customAlphabet(
- '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
- 7
-) // 7-character random string
-
-export function createChunkDecoder() {
- const decoder = new TextDecoder()
- return function (chunk: Uint8Array | undefined): string {
- if (!chunk) return ''
- return decoder.decode(chunk, { stream: true })
- }
-}
-
-export function random (start: number, end: number) {
- return start + Math.ceil(Math.random() * (end - start))
-}
-
-export function randomIP() {
- return `11.${random(104, 107)}.${random(1, 255)}.${random(1, 255)}`
-}
-
-export const defaultUID = Math.random().toString(36).slice(2)
-
-export function parseHeadersFromCurl(content: string) {
- const re = /-H '([^:]+):\s*([^']+)/mg
- const headers: HeadersInit = {}
- content = content.replaceAll('-H "', '-H \'').replaceAll('" ^', '\'\\').replaceAll('^\\^"', '"') // 将 cmd curl 转成 bash curl
- content.replace(re, (_: string, key: string, value: string) => {
- headers[key] = value
- return ''
- })
-
- return headers
-}
-
-export const ChunkKeys = ['BING_HEADER', 'BING_HEADER1', 'BING_HEADER2']
-export function encodeHeadersToCookie(content: string) {
- const base64Content = btoa(content)
- const contentChunks = base64Content.match(/.{1,4000}/g) || []
- return ChunkKeys.map((key, index) => `${key}=${contentChunks[index] ?? ''}`)
-}
-
-export function extraCurlFromCookie(cookies: Partial<{ [key: string]: string }>) {
- let base64Content = ''
- ChunkKeys.forEach((key) => {
- base64Content += (cookies[key] || '')
- })
- try {
- return atob(base64Content)
- } catch(e) {
- return ''
- }
-}
-
-export function extraHeadersFromCookie(cookies: Partial<{ [key: string]: string }>) {
- return parseHeadersFromCurl(extraCurlFromCookie(cookies))
-}
-
-export function formatDate(input: string | number | Date): string {
- const date = new Date(input)
- return date.toLocaleDateString('en-US', {
- month: 'long',
- day: 'numeric',
- year: 'numeric'
- })
-}
-
-export function parseCookie(cookie: string, cookieName: string) {
- const targetCookie = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`).test(cookie) ? RegExp.$1 : cookie
- return targetCookie ? decodeURIComponent(targetCookie).trim() : cookie.indexOf('=') === -1 ? cookie.trim() : ''
-}
-
-export function setCookie(key: string, value: string) {
- const maxAge = 86400 * 30
- document.cookie = `${key}=${value || ''}; Path=/; Max-Age=${maxAge}; SameSite=None; Secure`
-}
-
-export function getCookie(cookieName: string) {
- const re = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`)
- return re.test(document.cookie) ? RegExp.$1 : ''
-}
-
-export function parseCookies(cookie: string, cookieNames: string[]) {
- const cookies: { [key: string]: string } = {}
- cookieNames.forEach(cookieName => {
- cookies[cookieName] = parseCookie(cookie, cookieName)
- })
- return cookies
-}
-
-export const DEFAULT_UA = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.0.0'
-export const DEFAULT_IP = process.env.BING_IP || randomIP()
-
-export function parseUA(ua?: string, default_ua = DEFAULT_UA) {
- return / EDGE?/i.test(decodeURIComponent(ua || '')) ? decodeURIComponent(ua!.trim()) : default_ua
-}
-
-export function createHeaders(cookies: Partial<{ [key: string]: string }>, defaultHeaders?: Partial<{ [key: string]: string }>, type?: string) {
- let {
- BING_COOKIE = process.env.BING_COOKIE,
- BING_UA = process.env.BING_UA,
- BING_IP = process.env.BING_IP,
- BING_HEADER = process.env.BING_HEADER,
- IMAGE_ONLY = process.env.IMAGE_ONLY ?? '1',
- } = cookies
-
- if (BING_HEADER) {
- const headers = extraHeadersFromCookie({
- BING_HEADER,
- ...cookies,
- }) || {}
- if (/^(1|true|yes)$/.test(String(IMAGE_ONLY)) && type !== 'image') {
- // 仅画图时设置 cookie
- headers.cookie = `_U=${defaultUID}`
- }
- if (headers['user-agent']) {
- return headers
- }
- }
-
- const ua = parseUA(BING_UA)
-
- if (!BING_COOKIE) {
- BING_COOKIE = defaultHeaders?.IMAGE_BING_COOKIE || defaultUID // hf 暂时不用 Cookie 也可以正常使用
- }
-
- const parsedCookie = parseCookie(BING_COOKIE, '_U')
- if (!parsedCookie) {
- throw new Error('Invalid Cookie')
- }
- return {
- 'x-forwarded-for': BING_IP || DEFAULT_IP,
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
- 'User-Agent': ua!,
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- cookie: `_U=${parsedCookie}` || '',
- }
-}
-
-export class WatchDog {
- private tid = 0
- watch(fn: Function, timeout = 2000) {
- clearTimeout(this.tid)
- this.tid = setTimeout(fn, timeout + Math.random() * 1000)
- }
- reset() {
- clearTimeout(this.tid)
- }
-}
diff --git "a/spaces/Gmq-x/gpt-academic/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" "b/spaces/Gmq-x/gpt-academic/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py"
deleted file mode 100644
index ee6a1a44340ac2cf8fc3a4323c23218c69e0946f..0000000000000000000000000000000000000000
--- "a/spaces/Gmq-x/gpt-academic/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py"
+++ /dev/null
@@ -1,161 +0,0 @@
-from toolbox import update_ui
-from toolbox import CatchException, report_execption, write_results_to_file
-fast_debug = False
-
-class PaperFileGroup():
- def __init__(self):
- self.file_paths = []
- self.file_contents = []
- self.sp_file_contents = []
- self.sp_file_index = []
- self.sp_file_tag = []
-
- # count_token
- from request_llm.bridge_all import model_info
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
- def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
- self.get_token_num = get_token_num
-
- def run_file_split(self, max_token_limit=1900):
- """
- 将长文本分离开来
- """
- for index, file_content in enumerate(self.file_contents):
- if self.get_token_num(file_content) < max_token_limit:
- self.sp_file_contents.append(file_content)
- self.sp_file_index.append(index)
- self.sp_file_tag.append(self.file_paths[index])
- else:
- from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
- segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
- for j, segment in enumerate(segments):
- self.sp_file_contents.append(segment)
- self.sp_file_index.append(index)
- self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md")
-
- print('Segmentation: done')
-
-def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
- import time, os, re
- from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
-
- # <-------- 读取Markdown文件,删除其中的所有注释 ---------->
- pfg = PaperFileGroup()
-
- for index, fp in enumerate(file_manifest):
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
- file_content = f.read()
- # 记录删除注释后的文本
- pfg.file_paths.append(fp)
- pfg.file_contents.append(file_content)
-
- # <-------- 拆分过长的Markdown文件 ---------->
- pfg.run_file_split(max_token_limit=1500)
- n_split = len(pfg.sp_file_contents)
-
- # <-------- 多线程润色开始 ---------->
- if language == 'en->zh':
- inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" +
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
- inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
- sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
- elif language == 'zh->en':
- inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" +
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
- inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
- sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
-
- gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
- inputs_array=inputs_array,
- inputs_show_user_array=inputs_show_user_array,
- llm_kwargs=llm_kwargs,
- chatbot=chatbot,
- history_array=[[""] for _ in range(n_split)],
- sys_prompt_array=sys_prompt_array,
- # max_workers=5, # OpenAI所允许的最大并行过载
- scroller_max_len = 80
- )
-
- # <-------- 整理结果,退出 ---------->
- create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
- res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
- history = gpt_response_collection
- chatbot.append((f"{fp}完成了吗?", res))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-
-
-
-
-@CatchException
-def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import tiktoken
- except:
- report_execption(chatbot, history,
- a=f"解析项目: {txt}",
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- history = [] # 清空历史,以免输入溢出
- import glob, os
- if os.path.exists(txt):
- project_folder = txt
- else:
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)]
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
-
-
-
-
-
-@CatchException
-def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import tiktoken
- except:
- report_execption(chatbot, history,
- a=f"解析项目: {txt}",
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- history = [] # 清空历史,以免输入溢出
- import glob, os
- if os.path.exists(txt):
- project_folder = txt
- else:
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- if txt.endswith('.md'):
- file_manifest = [txt]
- else:
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)]
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
\ No newline at end of file
diff --git a/spaces/Gradio-Blocks/beat-interpolator/app.py b/spaces/Gradio-Blocks/beat-interpolator/app.py
deleted file mode 100644
index 93eb63d0b5eb5491659be26c8019a0e45f04a006..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/beat-interpolator/app.py
+++ /dev/null
@@ -1,195 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import annotations
-
-import argparse
-import os
-import glob
-import pickle
-import sys
-import importlib
-from typing import List, Tuple
-
-import gradio as gr
-import numpy as np
-import torch
-import torch.nn as nn
-
-from beat_interpolator import beat_interpolator
-
-
-def build_models():
- modules = glob.glob('examples/models/*')
- modules = [
- getattr(
- importlib.import_module(
- module.replace('/', '.'),
- package=None
- ),
- 'create'
- )()
- for module in modules
- if '.py' not in module and '__' not in module
- ]
-
- attrs = [ (module['name'], module) for module in modules ]
- mnist_idx = -1
- for i in range(len(attrs)):
- name, _ = attrs[i]
- if name == 'MNIST':
- mnist_idx = i
- if mnist_idx > -1:
- mnist_attr = attrs.pop(mnist_idx)
- attrs.insert(0, mnist_attr)
-
- return attrs
-
-
-def parse_args() -> argparse.Namespace:
- parser = argparse.ArgumentParser()
- parser.add_argument('--device', type=str, default='cpu')
- parser.add_argument('--theme', type=str)
- parser.add_argument('--share', action='store_true')
- parser.add_argument('--port', type=int)
- parser.add_argument('--disable-queue',
- dest='enable_queue',
- action='store_false')
- return parser.parse_args()
-
-
-def main():
- args = parse_args()
- enable_queue = args.enable_queue
- model_attrs = build_models()
-
- with gr.Blocks(theme=args.theme) as demo:
- gr.Markdown('''
'
- )
-
- demo.launch(
- enable_queue=args.enable_queue,
- server_port=args.port,
- share=args.share,
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py
deleted file mode 100644
index 835375cb0447378fc76431158eb0b8fc011d36bc..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py
+++ /dev/null
@@ -1,6 +0,0 @@
-_base_ = [
- '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py',
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
-]
-model = dict(
- decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/data/sound_dataset.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/data/sound_dataset.py
deleted file mode 100644
index 8b88cbe8016b4bd28c2de749177c9af29f7755fc..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/data/sound_dataset.py
+++ /dev/null
@@ -1,330 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-"""Dataset of audio with a simple description.
-"""
-
-from dataclasses import dataclass, fields, replace
-import json
-from pathlib import Path
-import random
-import typing as tp
-
-import numpy as np
-import torch
-
-from .info_audio_dataset import (
- InfoAudioDataset,
- get_keyword_or_keyword_list
-)
-from ..modules.conditioners import (
- ConditioningAttributes,
- SegmentWithAttributes,
- WavCondition,
-)
-
-
-EPS = torch.finfo(torch.float32).eps
-TARGET_LEVEL_LOWER = -35
-TARGET_LEVEL_UPPER = -15
-
-
-@dataclass
-class SoundInfo(SegmentWithAttributes):
- """Segment info augmented with Sound metadata.
- """
- description: tp.Optional[str] = None
- self_wav: tp.Optional[torch.Tensor] = None
-
- @property
- def has_sound_meta(self) -> bool:
- return self.description is not None
-
- def to_condition_attributes(self) -> ConditioningAttributes:
- out = ConditioningAttributes()
-
- for _field in fields(self):
- key, value = _field.name, getattr(self, _field.name)
- if key == 'self_wav':
- out.wav[key] = value
- else:
- out.text[key] = value
- return out
-
- @staticmethod
- def attribute_getter(attribute):
- if attribute == 'description':
- preprocess_func = get_keyword_or_keyword_list
- else:
- preprocess_func = None
- return preprocess_func
-
- @classmethod
- def from_dict(cls, dictionary: dict, fields_required: bool = False):
- _dictionary: tp.Dict[str, tp.Any] = {}
-
- # allow a subset of attributes to not be loaded from the dictionary
- # these attributes may be populated later
- post_init_attributes = ['self_wav']
-
- for _field in fields(cls):
- if _field.name in post_init_attributes:
- continue
- elif _field.name not in dictionary:
- if fields_required:
- raise KeyError(f"Unexpected missing key: {_field.name}")
- else:
- preprocess_func: tp.Optional[tp.Callable] = cls.attribute_getter(_field.name)
- value = dictionary[_field.name]
- if preprocess_func:
- value = preprocess_func(value)
- _dictionary[_field.name] = value
- return cls(**_dictionary)
-
-
-class SoundDataset(InfoAudioDataset):
- """Sound audio dataset: Audio dataset with environmental sound-specific metadata.
-
- Args:
- info_fields_required (bool): Whether all the mandatory metadata fields should be in the loaded metadata.
- external_metadata_source (tp.Optional[str]): Folder containing JSON metadata for the corresponding dataset.
- The metadata files contained in this folder are expected to match the stem of the audio file with
- a json extension.
- aug_p (float): Probability of performing audio mixing augmentation on the batch.
- mix_p (float): Proportion of batch items that are mixed together when applying audio mixing augmentation.
- mix_snr_low (int): Lowerbound for SNR value sampled for mixing augmentation.
- mix_snr_high (int): Upperbound for SNR value sampled for mixing augmentation.
- mix_min_overlap (float): Minimum overlap between audio files when performing mixing augmentation.
- kwargs: Additional arguments for AudioDataset.
-
- See `audiocraft.data.info_audio_dataset.InfoAudioDataset` for full initialization arguments.
- """
- def __init__(
- self,
- *args,
- info_fields_required: bool = True,
- external_metadata_source: tp.Optional[str] = None,
- aug_p: float = 0.,
- mix_p: float = 0.,
- mix_snr_low: int = -5,
- mix_snr_high: int = 5,
- mix_min_overlap: float = 0.5,
- **kwargs
- ):
- kwargs['return_info'] = True # We require the info for each song of the dataset.
- super().__init__(*args, **kwargs)
- self.info_fields_required = info_fields_required
- self.external_metadata_source = external_metadata_source
- self.aug_p = aug_p
- self.mix_p = mix_p
- if self.aug_p > 0:
- assert self.mix_p > 0, "Expecting some mixing proportion mix_p if aug_p > 0"
- assert self.channels == 1, "SoundDataset with audio mixing considers only monophonic audio"
- self.mix_snr_low = mix_snr_low
- self.mix_snr_high = mix_snr_high
- self.mix_min_overlap = mix_min_overlap
-
- def _get_info_path(self, path: tp.Union[str, Path]) -> Path:
- """Get path of JSON with metadata (description, etc.).
- If there exists a JSON with the same name as 'path.name', then it will be used.
- Else, such JSON will be searched for in an external json source folder if it exists.
- """
- info_path = Path(path).with_suffix('.json')
- if Path(info_path).exists():
- return info_path
- elif self.external_metadata_source and (Path(self.external_metadata_source) / info_path.name).exists():
- return Path(self.external_metadata_source) / info_path.name
- else:
- raise Exception(f"Unable to find a metadata JSON for path: {path}")
-
- def __getitem__(self, index):
- wav, info = super().__getitem__(index)
- info_data = info.to_dict()
- info_path = self._get_info_path(info.meta.path)
- if Path(info_path).exists():
- with open(info_path, 'r') as json_file:
- sound_data = json.load(json_file)
- sound_data.update(info_data)
- sound_info = SoundInfo.from_dict(sound_data, fields_required=self.info_fields_required)
- # if there are multiple descriptions, sample one randomly
- if isinstance(sound_info.description, list):
- sound_info.description = random.choice(sound_info.description)
- else:
- sound_info = SoundInfo.from_dict(info_data, fields_required=False)
-
- sound_info.self_wav = WavCondition(
- wav=wav[None], length=torch.tensor([info.n_frames]),
- sample_rate=[sound_info.sample_rate], path=[info.meta.path], seek_time=[info.seek_time])
-
- return wav, sound_info
-
- def collater(self, samples):
- # when training, audio mixing is performed in the collate function
- wav, sound_info = super().collater(samples) # SoundDataset always returns infos
- if self.aug_p > 0:
- wav, sound_info = mix_samples(wav, sound_info, self.aug_p, self.mix_p,
- snr_low=self.mix_snr_low, snr_high=self.mix_snr_high,
- min_overlap=self.mix_min_overlap)
- return wav, sound_info
-
-
-def rms_f(x: torch.Tensor) -> torch.Tensor:
- return (x ** 2).mean(1).pow(0.5)
-
-
-def normalize(audio: torch.Tensor, target_level: int = -25) -> torch.Tensor:
- """Normalize the signal to the target level."""
- rms = rms_f(audio)
- scalar = 10 ** (target_level / 20) / (rms + EPS)
- audio = audio * scalar.unsqueeze(1)
- return audio
-
-
-def is_clipped(audio: torch.Tensor, clipping_threshold: float = 0.99) -> torch.Tensor:
- return (abs(audio) > clipping_threshold).any(1)
-
-
-def mix_pair(src: torch.Tensor, dst: torch.Tensor, min_overlap: float) -> torch.Tensor:
- start = random.randint(0, int(src.shape[1] * (1 - min_overlap)))
- remainder = src.shape[1] - start
- if dst.shape[1] > remainder:
- src[:, start:] = src[:, start:] + dst[:, :remainder]
- else:
- src[:, start:start+dst.shape[1]] = src[:, start:start+dst.shape[1]] + dst
- return src
-
-
-def snr_mixer(clean: torch.Tensor, noise: torch.Tensor, snr: int, min_overlap: float,
- target_level: int = -25, clipping_threshold: float = 0.99) -> torch.Tensor:
- """Function to mix clean speech and noise at various SNR levels.
-
- Args:
- clean (torch.Tensor): Clean audio source to mix, of shape [B, T].
- noise (torch.Tensor): Noise audio source to mix, of shape [B, T].
- snr (int): SNR level when mixing.
- min_overlap (float): Minimum overlap between the two mixed sources.
- target_level (int): Gain level in dB.
- clipping_threshold (float): Threshold for clipping the audio.
- Returns:
- torch.Tensor: The mixed audio, of shape [B, T].
- """
- if clean.shape[1] > noise.shape[1]:
- noise = torch.nn.functional.pad(noise, (0, clean.shape[1] - noise.shape[1]))
- else:
- noise = noise[:, :clean.shape[1]]
-
- # normalizing to -25 dB FS
- clean = clean / (clean.max(1)[0].abs().unsqueeze(1) + EPS)
- clean = normalize(clean, target_level)
- rmsclean = rms_f(clean)
-
- noise = noise / (noise.max(1)[0].abs().unsqueeze(1) + EPS)
- noise = normalize(noise, target_level)
- rmsnoise = rms_f(noise)
-
- # set the noise level for a given SNR
- noisescalar = (rmsclean / (10 ** (snr / 20)) / (rmsnoise + EPS)).unsqueeze(1)
- noisenewlevel = noise * noisescalar
-
- # mix noise and clean speech
- noisyspeech = mix_pair(clean, noisenewlevel, min_overlap)
-
- # randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value
- # there is a chance of clipping that might happen with very less probability, which is not a major issue.
- noisy_rms_level = np.random.randint(TARGET_LEVEL_LOWER, TARGET_LEVEL_UPPER)
- rmsnoisy = rms_f(noisyspeech)
- scalarnoisy = (10 ** (noisy_rms_level / 20) / (rmsnoisy + EPS)).unsqueeze(1)
- noisyspeech = noisyspeech * scalarnoisy
- clean = clean * scalarnoisy
- noisenewlevel = noisenewlevel * scalarnoisy
-
- # final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly
- clipped = is_clipped(noisyspeech)
- if clipped.any():
- noisyspeech_maxamplevel = noisyspeech[clipped].max(1)[0].abs().unsqueeze(1) / (clipping_threshold - EPS)
- noisyspeech[clipped] = noisyspeech[clipped] / noisyspeech_maxamplevel
-
- return noisyspeech
-
-
-def snr_mix(src: torch.Tensor, dst: torch.Tensor, snr_low: int, snr_high: int, min_overlap: float):
- if snr_low == snr_high:
- snr = snr_low
- else:
- snr = np.random.randint(snr_low, snr_high)
- mix = snr_mixer(src, dst, snr, min_overlap)
- return mix
-
-
-def mix_text(src_text: str, dst_text: str):
- """Mix text from different sources by concatenating them."""
- if src_text == dst_text:
- return src_text
- return src_text + " " + dst_text
-
-
-def mix_samples(wavs: torch.Tensor, infos: tp.List[SoundInfo], aug_p: float, mix_p: float,
- snr_low: int, snr_high: int, min_overlap: float):
- """Mix samples within a batch, summing the waveforms and concatenating the text infos.
-
- Args:
- wavs (torch.Tensor): Audio tensors of shape [B, C, T].
- infos (list[SoundInfo]): List of SoundInfo items corresponding to the audio.
- aug_p (float): Augmentation probability.
- mix_p (float): Proportion of items in the batch to mix (and merge) together.
- snr_low (int): Lowerbound for sampling SNR.
- snr_high (int): Upperbound for sampling SNR.
- min_overlap (float): Minimum overlap between mixed samples.
- Returns:
- tuple[torch.Tensor, list[SoundInfo]]: A tuple containing the mixed wavs
- and mixed SoundInfo for the given batch.
- """
- # no mixing to perform within the batch
- if mix_p == 0:
- return wavs, infos
-
- if random.uniform(0, 1) < aug_p:
- # perform all augmentations on waveforms as [B, T]
- # randomly picking pairs of audio to mix
- assert wavs.size(1) == 1, f"Mix samples requires monophonic audio but C={wavs.size(1)}"
- wavs = wavs.mean(dim=1, keepdim=False)
- B, T = wavs.shape
- k = int(mix_p * B)
- mixed_sources_idx = torch.randperm(B)[:k]
- mixed_targets_idx = torch.randperm(B)[:k]
- aug_wavs = snr_mix(
- wavs[mixed_sources_idx],
- wavs[mixed_targets_idx],
- snr_low,
- snr_high,
- min_overlap,
- )
- # mixing textual descriptions in metadata
- descriptions = [info.description for info in infos]
- aug_infos = []
- for i, j in zip(mixed_sources_idx, mixed_targets_idx):
- text = mix_text(descriptions[i], descriptions[j])
- m = replace(infos[i])
- m.description = text
- aug_infos.append(m)
-
- # back to [B, C, T]
- aug_wavs = aug_wavs.unsqueeze(1)
- assert aug_wavs.shape[0] > 0, "Samples mixing returned empty batch."
- assert aug_wavs.dim() == 3, f"Returned wav should be [B, C, T] but dim = {aug_wavs.dim()}"
- assert aug_wavs.shape[0] == len(aug_infos), "Mismatch between number of wavs and infos in the batch"
-
- return aug_wavs, aug_infos # [B, C, T]
- else:
- # randomly pick samples in the batch to match
- # the batch size when performing audio mixing
- B, C, T = wavs.shape
- k = int(mix_p * B)
- wav_idx = torch.randperm(B)[:k]
- wavs = wavs[wav_idx]
- infos = [infos[i] for i in wav_idx]
- assert wavs.shape[0] == len(infos), "Mismatch between number of wavs and infos in the batch"
-
- return wavs, infos # [B, C, T]
diff --git a/spaces/GuyYariv/AudioToken/modules/AudioToken/embedder.py b/spaces/GuyYariv/AudioToken/modules/AudioToken/embedder.py
deleted file mode 100644
index 39035969bf12ba730869e4c5fc5342998ea51d60..0000000000000000000000000000000000000000
--- a/spaces/GuyYariv/AudioToken/modules/AudioToken/embedder.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import torch
-import torch.nn as nn
-from modules.fga.atten import Atten
-
-
-class FGAEmbedder(nn.Module):
- def __init__(self, input_size=768*3, output_size=768):
- super(FGAEmbedder, self).__init__()
- self.fc1 = nn.Linear(input_size, input_size)
- self.fc2 = nn.Linear(input_size, output_size)
- self.gelu = nn.GELU()
- self.fga = Atten(util_e=[output_size], pairwise_flag=False)
-
- def forward(self, audio_embs):
- audio_embs = self.fc1(audio_embs)
- audio_embs = self.gelu(audio_embs)
- audio_embs = self.fc2(audio_embs)
- attend = self.fga([audio_embs])[0]
- return attend
diff --git a/spaces/HaHaBill/LandShapes-Antarctica/netdissect/upsegmodel/prroi_pool/src/prroi_pooling_gpu.c b/spaces/HaHaBill/LandShapes-Antarctica/netdissect/upsegmodel/prroi_pool/src/prroi_pooling_gpu.c
deleted file mode 100644
index 1e652963cdb76fe628d0a33bc270d2c25a0f3770..0000000000000000000000000000000000000000
--- a/spaces/HaHaBill/LandShapes-Antarctica/netdissect/upsegmodel/prroi_pool/src/prroi_pooling_gpu.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * File : prroi_pooling_gpu.c
- * Author : Jiayuan Mao, Tete Xiao
- * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com
- * Date : 07/13/2018
- *
- * Distributed under terms of the MIT license.
- * Copyright (c) 2017 Megvii Technology Limited.
- */
-
-#include
-#include
-
-#include
-#include
-
-#include
-
-#include "prroi_pooling_gpu_impl.cuh"
-
-
-at::Tensor prroi_pooling_forward_cuda(const at::Tensor &features, const at::Tensor &rois, int pooled_height, int pooled_width, float spatial_scale) {
- int nr_rois = rois.size(0);
- int nr_channels = features.size(1);
- int height = features.size(2);
- int width = features.size(3);
- int top_count = nr_rois * nr_channels * pooled_height * pooled_width;
- auto output = at::zeros({nr_rois, nr_channels, pooled_height, pooled_width}, features.options());
-
- if (output.numel() == 0) {
- THCudaCheck(cudaGetLastError());
- return output;
- }
-
- cudaStream_t stream = at::cuda::getCurrentCUDAStream();
- PrRoIPoolingForwardGpu(
- stream, features.data(), rois.data(), output.data(),
- nr_channels, height, width, pooled_height, pooled_width, spatial_scale,
- top_count
- );
-
- THCudaCheck(cudaGetLastError());
- return output;
-}
-
-at::Tensor prroi_pooling_backward_cuda(
- const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff,
- int pooled_height, int pooled_width, float spatial_scale) {
-
- auto features_diff = at::zeros_like(features);
-
- int nr_rois = rois.size(0);
- int batch_size = features.size(0);
- int nr_channels = features.size(1);
- int height = features.size(2);
- int width = features.size(3);
- int top_count = nr_rois * nr_channels * pooled_height * pooled_width;
- int bottom_count = batch_size * nr_channels * height * width;
-
- if (output.numel() == 0) {
- THCudaCheck(cudaGetLastError());
- return features_diff;
- }
-
- cudaStream_t stream = at::cuda::getCurrentCUDAStream();
- PrRoIPoolingBackwardGpu(
- stream,
- features.data(), rois.data(), output.data(), output_diff.data(),
- features_diff.data(),
- nr_channels, height, width, pooled_height, pooled_width, spatial_scale,
- top_count, bottom_count
- );
-
- THCudaCheck(cudaGetLastError());
- return features_diff;
-}
-
-at::Tensor prroi_pooling_coor_backward_cuda(
- const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff,
- int pooled_height, int pooled_width, float spatial_scale) {
-
- auto coor_diff = at::zeros_like(rois);
-
- int nr_rois = rois.size(0);
- int nr_channels = features.size(1);
- int height = features.size(2);
- int width = features.size(3);
- int top_count = nr_rois * nr_channels * pooled_height * pooled_width;
- int bottom_count = nr_rois * 5;
-
- if (output.numel() == 0) {
- THCudaCheck(cudaGetLastError());
- return coor_diff;
- }
-
- cudaStream_t stream = at::cuda::getCurrentCUDAStream();
- PrRoIPoolingCoorBackwardGpu(
- stream,
- features.data(), rois.data(), output.data(), output_diff.data(),
- coor_diff.data(),
- nr_channels, height, width, pooled_height, pooled_width, spatial_scale,
- top_count, bottom_count
- );
-
- THCudaCheck(cudaGetLastError());
- return coor_diff;
-}
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def("prroi_pooling_forward_cuda", &prroi_pooling_forward_cuda, "PRRoIPooling_forward");
- m.def("prroi_pooling_backward_cuda", &prroi_pooling_backward_cuda, "PRRoIPooling_backward");
- m.def("prroi_pooling_coor_backward_cuda", &prroi_pooling_coor_backward_cuda, "PRRoIPooling_backward_coor");
-}
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/transformer/transformer_encoder.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/transformer/transformer_encoder.py
deleted file mode 100644
index f007776a6f3b7e6731edc01d95aa24eed255d0e8..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/transformer/transformer_encoder.py
+++ /dev/null
@@ -1,341 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-from typing import Dict, List, Optional
-
-import torch
-import torch.nn as nn
-from fairseq import utils
-from fairseq.distributed import fsdp_wrap
-from fairseq.models import FairseqEncoder
-from fairseq.modules import (
- FairseqDropout,
- LayerDropModuleList,
- LayerNorm,
- PositionalEmbedding,
- SinusoidalPositionalEmbedding,
-)
-from fairseq.modules import transformer_layer
-from fairseq.modules.checkpoint_activations import checkpoint_wrapper
-from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
-from torch import Tensor
-from fairseq.models.transformer import (
- TransformerConfig,
-)
-
-
-# rewrite name for backward compatibility in `make_generation_fast_`
-def module_name_fordropout(module_name: str) -> str:
- if module_name == 'TransformerEncoderBase':
- return 'TransformerEncoder'
- else:
- return module_name
-
-
-class TransformerEncoderBase(FairseqEncoder):
- """
- Transformer encoder consisting of *cfg.encoder.layers* layers. Each layer
- is a :class:`TransformerEncoderLayer`.
-
- Args:
- args (argparse.Namespace): parsed command-line arguments
- dictionary (~fairseq.data.Dictionary): encoding dictionary
- embed_tokens (torch.nn.Embedding): input embedding
- """
-
- def __init__(self, cfg, dictionary, embed_tokens):
- self.cfg = cfg
- super().__init__(dictionary)
- self.register_buffer("version", torch.Tensor([3]))
-
- self.dropout_module = FairseqDropout(
- cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__)
- )
- self.encoder_layerdrop = cfg.encoder.layerdrop
-
- embed_dim = embed_tokens.embedding_dim
- self.padding_idx = embed_tokens.padding_idx
- self.max_source_positions = cfg.max_source_positions
-
- self.embed_tokens = embed_tokens
-
- self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
-
- self.embed_positions = (
- PositionalEmbedding(
- cfg.max_source_positions,
- embed_dim,
- self.padding_idx,
- learned=cfg.encoder.learned_pos,
- )
- if not cfg.no_token_positional_embeddings
- else None
- )
- if cfg.layernorm_embedding:
- self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export)
- else:
- self.layernorm_embedding = None
-
- if not cfg.adaptive_input and cfg.quant_noise.pq > 0:
- self.quant_noise = apply_quant_noise_(
- nn.Linear(embed_dim, embed_dim, bias=False),
- cfg.quant_noise.pq,
- cfg.quant_noise.pq_block_size,
- )
- else:
- self.quant_noise = None
-
- if self.encoder_layerdrop > 0.0:
- self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
- else:
- self.layers = nn.ModuleList([])
- self.layers.extend(
- [self.build_encoder_layer(cfg) for i in range(cfg.encoder.layers)]
- )
- self.num_layers = len(self.layers)
-
- if cfg.encoder.normalize_before:
- self.layer_norm = LayerNorm(embed_dim, export=cfg.export)
- else:
- self.layer_norm = None
-
- def build_encoder_layer(self, cfg):
- layer = transformer_layer.TransformerEncoderLayerBase(cfg)
- checkpoint = cfg.checkpoint_activations
- if checkpoint:
- offload_to_cpu = cfg.offload_activations
- layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
- # if we are checkpointing, enforce that FSDP always wraps the
- # checkpointed layer, regardless of layer size
- min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
- layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
- return layer
-
- def forward_embedding(
- self, src_tokens, token_embedding: Optional[torch.Tensor] = None
- ):
- # embed tokens and positions
- if token_embedding is None:
- token_embedding = self.embed_tokens(src_tokens)
- x = embed = self.embed_scale * token_embedding
- if self.embed_positions is not None:
- x = embed + self.embed_positions(src_tokens)
- if self.layernorm_embedding is not None:
- x = self.layernorm_embedding(x)
- x = self.dropout_module(x)
- if self.quant_noise is not None:
- x = self.quant_noise(x)
- return x, embed
-
- def forward(
- self,
- src_tokens,
- src_lengths: Optional[torch.Tensor] = None,
- return_all_hiddens: bool = False,
- token_embeddings: Optional[torch.Tensor] = None,
- ):
- """
- Args:
- src_tokens (LongTensor): tokens in the source language of shape
- `(batch, src_len)`
- src_lengths (torch.LongTensor): lengths of each source sentence of
- shape `(batch)`
- return_all_hiddens (bool, optional): also return all of the
- intermediate hidden states (default: False).
- token_embeddings (torch.Tensor, optional): precomputed embeddings
- default `None` will recompute embeddings
-
- Returns:
- dict:
- - **encoder_out** (Tensor): the last encoder layer's output of
- shape `(src_len, batch, embed_dim)`
- - **encoder_padding_mask** (ByteTensor): the positions of
- padding elements of shape `(batch, src_len)`
- - **encoder_embedding** (Tensor): the (scaled) embedding lookup
- of shape `(batch, src_len, embed_dim)`
- - **encoder_states** (List[Tensor]): all intermediate
- hidden states of shape `(src_len, batch, embed_dim)`.
- Only populated if *return_all_hiddens* is True.
- """
- return self.forward_scriptable(
- src_tokens, src_lengths, return_all_hiddens, token_embeddings
- )
-
- # TorchScript doesn't support super() method so that the scriptable Subclass
- # can't access the base class model in Torchscript.
- # Current workaround is to add a helper function with different name and
- # call the helper function from scriptable Subclass.
- def forward_scriptable(
- self,
- src_tokens,
- src_lengths: Optional[torch.Tensor] = None,
- return_all_hiddens: bool = False,
- token_embeddings: Optional[torch.Tensor] = None,
- ):
- """
- Args:
- src_tokens (LongTensor): tokens in the source language of shape
- `(batch, src_len)`
- src_lengths (torch.LongTensor): lengths of each source sentence of
- shape `(batch)`
- return_all_hiddens (bool, optional): also return all of the
- intermediate hidden states (default: False).
- token_embeddings (torch.Tensor, optional): precomputed embeddings
- default `None` will recompute embeddings
-
- Returns:
- dict:
- - **encoder_out** (Tensor): the last encoder layer's output of
- shape `(src_len, batch, embed_dim)`
- - **encoder_padding_mask** (ByteTensor): the positions of
- padding elements of shape `(batch, src_len)`
- - **encoder_embedding** (Tensor): the (scaled) embedding lookup
- of shape `(batch, src_len, embed_dim)`
- - **encoder_states** (List[Tensor]): all intermediate
- hidden states of shape `(src_len, batch, embed_dim)`.
- Only populated if *return_all_hiddens* is True.
- """
- # compute padding mask
- encoder_padding_mask = src_tokens.eq(self.padding_idx)
- has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any()
-
- x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
-
- # account for padding while computing the representation
- if has_pads:
- x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- encoder_states = []
-
- if return_all_hiddens:
- encoder_states.append(x)
-
- # encoder layers
- for layer in self.layers:
- x = layer(
- x, encoder_padding_mask=encoder_padding_mask if has_pads else None
- )
- if return_all_hiddens:
- assert encoder_states is not None
- encoder_states.append(x)
-
- if self.layer_norm is not None:
- x = self.layer_norm(x)
-
- # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
- # `forward` so we use a dictionary instead.
- # TorchScript does not support mixed values so the values are all lists.
- # The empty list is equivalent to None.
- src_lengths = src_tokens.ne(self.padding_idx).sum(dim=1, dtype=torch.int32).reshape(-1, 1).contiguous()
- return {
- "encoder_out": [x], # T x B x C
- "encoder_padding_mask": [encoder_padding_mask], # B x T
- "encoder_embedding": [encoder_embedding], # B x T x C
- "encoder_states": encoder_states, # List[T x B x C]
- "src_tokens": [],
- "src_lengths": [src_lengths],
- }
-
- @torch.jit.export
- def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
- """
- Reorder encoder output according to *new_order*.
-
- Args:
- encoder_out: output from the ``forward()`` method
- new_order (LongTensor): desired order
-
- Returns:
- *encoder_out* rearranged according to *new_order*
- """
- if len(encoder_out["encoder_out"]) == 0:
- new_encoder_out = []
- else:
- new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
- if len(encoder_out["encoder_padding_mask"]) == 0:
- new_encoder_padding_mask = []
- else:
- new_encoder_padding_mask = [
- encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
- ]
- if len(encoder_out["encoder_embedding"]) == 0:
- new_encoder_embedding = []
- else:
- new_encoder_embedding = [
- encoder_out["encoder_embedding"][0].index_select(0, new_order)
- ]
-
- if len(encoder_out["src_tokens"]) == 0:
- src_tokens = []
- else:
- src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
-
- if len(encoder_out["src_lengths"]) == 0:
- src_lengths = []
- else:
- src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
-
- encoder_states = encoder_out["encoder_states"]
- if len(encoder_states) > 0:
- for idx, state in enumerate(encoder_states):
- encoder_states[idx] = state.index_select(1, new_order)
-
- return {
- "encoder_out": new_encoder_out, # T x B x C
- "encoder_padding_mask": new_encoder_padding_mask, # B x T
- "encoder_embedding": new_encoder_embedding, # B x T x C
- "encoder_states": encoder_states, # List[T x B x C]
- "src_tokens": src_tokens, # B x T
- "src_lengths": src_lengths, # B x 1
- }
-
- def max_positions(self):
- """Maximum input length supported by the encoder."""
- if self.embed_positions is None:
- return self.max_source_positions
- return min(self.max_source_positions, self.embed_positions.max_positions)
-
- def upgrade_state_dict_named(self, state_dict, name):
- """Upgrade a (possibly old) state dict for new versions of fairseq."""
- if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
- weights_key = "{}.embed_positions.weights".format(name)
- if weights_key in state_dict:
- print("deleting {0}".format(weights_key))
- del state_dict[weights_key]
- state_dict[
- "{}.embed_positions._float_tensor".format(name)
- ] = torch.FloatTensor(1)
- for i in range(self.num_layers):
- # update layer norms
- self.layers[i].upgrade_state_dict_named(
- state_dict, "{}.layers.{}".format(name, i)
- )
-
- version_key = "{}.version".format(name)
- if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
- # earlier checkpoints did not normalize after the stack of layers
- self.layer_norm = None
- self.normalize = False
- state_dict[version_key] = torch.Tensor([1])
- return state_dict
-
-
-class TransformerEncoder(TransformerEncoderBase):
- def __init__(self, args, dictionary, embed_tokens):
- self.args = args
- super().__init__(
- TransformerConfig.from_namespace(args),
- dictionary,
- embed_tokens,
- )
-
- def build_encoder_layer(self, args):
- return super().build_encoder_layer(
- TransformerConfig.from_namespace(args),
- )
diff --git a/spaces/HighCWu/GFPGAN-1.3/README.md b/spaces/HighCWu/GFPGAN-1.3/README.md
deleted file mode 100644
index eae07236d1eaf74a49b332de56c0a4a45b27da35..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/GFPGAN-1.3/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: GFPGAN 1.3
-emoji: 💻
-colorFrom: blue
-colorTo: red
-sdk: gradio
-sdk_version: 3.0.22
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Hmjz100/MT3/README.md b/spaces/Hmjz100/MT3/README.md
deleted file mode 100644
index ba06e8d589e813eb5c0aaf4b6e3da68c292ab883..0000000000000000000000000000000000000000
--- a/spaces/Hmjz100/MT3/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
----
-title: MT3
-emoji: 🎼
-colorFrom: purple
-colorTo: green
-sdk: gradio
-app_file: app.py
-duplicated_from: oniati/mrt
----
-# Configuration
-`title`: _string_
-Display title for the Space
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
\ No newline at end of file
diff --git a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/cache_dir/HuggingFaceM4/OBELICS_opt_out_docs_removed_2023_07_12_train_texts/text_duplicates/text_duplicates.html b/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/cache_dir/HuggingFaceM4/OBELICS_opt_out_docs_removed_2023_07_12_train_texts/text_duplicates/text_duplicates.html
deleted file mode 100644
index 0829d026a4b7c4ceb3e5382c5f3f1bd3b5d8c4f0..0000000000000000000000000000000000000000
--- a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/cache_dir/HuggingFaceM4/OBELICS_opt_out_docs_removed_2023_07_12_train_texts/text_duplicates/text_duplicates.html
+++ /dev/null
@@ -1 +0,0 @@
-
duplicate_fraction
0.0
duplicates_dict
\ No newline at end of file
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/bucket_pad_length_dataset.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/bucket_pad_length_dataset.py
deleted file mode 100644
index 0f9410014845873bb0344fca6478c231c88e9dea..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/data/bucket_pad_length_dataset.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-import torch.nn.functional as F
-from fairseq.data import BaseWrapperDataset
-from fairseq.data.data_utils import get_buckets, get_bucketed_sizes
-
-
-class BucketPadLengthDataset(BaseWrapperDataset):
- """
- Bucket and pad item lengths to the nearest bucket size. This can be used to
- reduce the number of unique batch shapes, which is important on TPUs since
- each new batch shape requires a recompilation.
-
- Args:
- dataset (FairseqDatset): dataset to bucket
- sizes (List[int]): all item sizes
- num_buckets (int): number of buckets to create
- pad_idx (int): padding symbol
- left_pad (bool): if True, pad on the left; otherwise right pad
- """
-
- def __init__(
- self,
- dataset,
- sizes,
- num_buckets,
- pad_idx,
- left_pad,
- tensor_key=None,
- ):
- super().__init__(dataset)
- self.pad_idx = pad_idx
- self.left_pad = left_pad
-
- assert num_buckets > 0
- self.buckets = get_buckets(sizes, num_buckets)
- self._bucketed_sizes = get_bucketed_sizes(sizes, self.buckets)
- self._tensor_key = tensor_key
-
- def _set_tensor(self, item, val):
- if self._tensor_key is None:
- return val
- item[self._tensor_key] = val
- return item
-
- def _get_tensor(self, item):
- if self._tensor_key is None:
- return item
- return item[self._tensor_key]
-
- def _pad(self, tensor, bucket_size, dim=-1):
- num_pad = bucket_size - tensor.size(dim)
- return F.pad(
- tensor,
- (num_pad if self.left_pad else 0, 0 if self.left_pad else num_pad),
- value=self.pad_idx,
- )
-
- def __getitem__(self, index):
- item = self.dataset[index]
- bucket_size = self._bucketed_sizes[index]
- tensor = self._get_tensor(item)
- padded = self._pad(tensor, bucket_size)
- return self._set_tensor(item, padded)
-
- @property
- def sizes(self):
- return self._bucketed_sizes
-
- def num_tokens(self, index):
- return self._bucketed_sizes[index]
-
- def size(self, index):
- return self._bucketed_sizes[index]
diff --git a/spaces/Iceclear/StableSR/StableSR/ldm/modules/diffusionmodules/openaimodel.py b/spaces/Iceclear/StableSR/StableSR/ldm/modules/diffusionmodules/openaimodel.py
deleted file mode 100644
index 6aa3f5b26db1117564de1f41e16353b1858d732b..0000000000000000000000000000000000000000
--- a/spaces/Iceclear/StableSR/StableSR/ldm/modules/diffusionmodules/openaimodel.py
+++ /dev/null
@@ -1,1541 +0,0 @@
-from abc import abstractmethod
-from functools import partial
-import math
-from typing import Iterable
-import torch
-
-import numpy as np
-import torch as th
-import torch.nn as nn
-import torch.nn.functional as F
-
-try:
- import xformers
- import xformers.ops
- XFORMERS_IS_AVAILBLE = True
-except:
- XFORMERS_IS_AVAILBLE = False
-
-from ldm.modules.diffusionmodules.util import (
- checkpoint,
- conv_nd,
- linear,
- avg_pool_nd,
- zero_module,
- normalization,
- timestep_embedding,
-)
-from ldm.modules.attention import SpatialTransformer, SpatialTransformerV2
-from ldm.modules.spade import SPADE
-
-from basicsr.archs.stylegan2_arch import ConvLayer, EqualConv2d
-# dummy replace
-def convert_module_to_f16(x):
- pass
-
-def convert_module_to_f32(x):
- pass
-
-def exists(val):
- return val is not None
-
-def cal_fea_cossim(fea_1, fea_2, save_dir=None):
- cossim_fuc = nn.CosineSimilarity(dim=-1, eps=1e-6)
- if save_dir is None:
- save_dir_1 = './cos_sim64_1_not.txt'
- save_dir_2 = './cos_sim64_2_not.txt'
- b, c, h, w = fea_1.size()
- fea_1 = fea_1.reshape(b, c, h*w)
- fea_2 = fea_2.reshape(b, c, h*w)
- cos_sim = cossim_fuc(fea_1, fea_2)
- cos_sim = cos_sim.data.cpu().numpy()
- with open(save_dir_1, "a") as my_file:
- my_file.write(str(np.mean(cos_sim[0])) + "\n")
- # with open(save_dir_2, "a") as my_file:
- # my_file.write(str(np.mean(cos_sim[1])) + "\n")
-
-## go
-class AttentionPool2d(nn.Module):
- """
- Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
- """
-
- def __init__(
- self,
- spacial_dim: int,
- embed_dim: int,
- num_heads_channels: int,
- output_dim: int = None,
- ):
- super().__init__()
- self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
- self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
- self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
- self.num_heads = embed_dim // num_heads_channels
- self.attention = QKVAttention(self.num_heads)
-
- def forward(self, x):
- b, c, *_spatial = x.shape
- x = x.reshape(b, c, -1) # NC(HW)
- x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
- x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
- x = self.qkv_proj(x)
- x = self.attention(x)
- x = self.c_proj(x)
- return x[:, :, 0]
-
-
-class TimestepBlock(nn.Module):
- """
- Any module where forward() takes timestep embeddings as a second argument.
- """
-
- @abstractmethod
- def forward(self, x, emb):
- """
- Apply the module to `x` given `emb` timestep embeddings.
- """
-
-class TimestepBlockDual(nn.Module):
- """
- Any module where forward() takes timestep embeddings as a second argument.
- """
-
- @abstractmethod
- def forward(self, x, emb, cond):
- """
- Apply the module to `x` given `emb` timestep embeddings.
- """
-
-class TimestepBlock3cond(nn.Module):
- """
- Any module where forward() takes timestep embeddings as a second argument.
- """
-
- @abstractmethod
- def forward(self, x, emb, s_cond, seg_cond):
- """
- Apply the module to `x` given `emb` timestep embeddings.
- """
-
-
-class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
- """
- A sequential module that passes timestep embeddings to the children that
- support it as an extra input.
- """
-
- def forward(self, x, emb, context=None, struct_cond=None, seg_cond=None):
- for layer in self:
- if isinstance(layer, TimestepBlock):
- x = layer(x, emb)
- elif isinstance(layer, SpatialTransformer) or isinstance(layer, SpatialTransformerV2):
- assert context is not None
- x = layer(x, context)
- elif isinstance(layer, TimestepBlockDual):
- assert struct_cond is not None
- x = layer(x, emb, struct_cond)
- elif isinstance(layer, TimestepBlock3cond):
- assert seg_cond is not None
- x = layer(x, emb, struct_cond, seg_cond)
- else:
- x = layer(x)
- return x
-
-
-class Upsample(nn.Module):
- """
- An upsampling layer with an optional convolution.
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- upsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- if use_conv:
- self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- if self.dims == 3:
- x = F.interpolate(
- x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
- )
- else:
- x = F.interpolate(x, scale_factor=2, mode="nearest")
- if self.use_conv:
- x = self.conv(x)
- return x
-
-class TransposedUpsample(nn.Module):
- 'Learned 2x upsampling without padding'
- def __init__(self, channels, out_channels=None, ks=5):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
-
- self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
-
- def forward(self,x):
- return self.up(x)
-
-
-class Downsample(nn.Module):
- """
- A downsampling layer with an optional convolution.
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- downsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- stride = 2 if dims != 3 else (1, 2, 2)
- if use_conv:
- self.op = conv_nd(
- dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
- )
- else:
- assert self.channels == self.out_channels
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- return self.op(x)
-
-
-class ResBlock(TimestepBlock):
- """
- A residual block that can optionally change the number of channels.
- :param channels: the number of input channels.
- :param emb_channels: the number of timestep embedding channels.
- :param dropout: the rate of dropout.
- :param out_channels: if specified, the number of out channels.
- :param use_conv: if True and out_channels is specified, use a spatial
- convolution instead of a smaller 1x1 convolution to change the
- channels in the skip connection.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param use_checkpoint: if True, use gradient checkpointing on this module.
- :param up: if True, use this block for upsampling.
- :param down: if True, use this block for downsampling.
- """
-
- def __init__(
- self,
- channels,
- emb_channels,
- dropout,
- out_channels=None,
- use_conv=False,
- use_scale_shift_norm=False,
- dims=2,
- use_checkpoint=False,
- up=False,
- down=False,
- ):
- super().__init__()
- self.channels = channels
- self.emb_channels = emb_channels
- self.dropout = dropout
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_checkpoint = use_checkpoint
- self.use_scale_shift_norm = use_scale_shift_norm
-
- self.in_layers = nn.Sequential(
- normalization(channels),
- nn.SiLU(),
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
- )
-
- self.updown = up or down
-
- if up:
- self.h_upd = Upsample(channels, False, dims)
- self.x_upd = Upsample(channels, False, dims)
- elif down:
- self.h_upd = Downsample(channels, False, dims)
- self.x_upd = Downsample(channels, False, dims)
- else:
- self.h_upd = self.x_upd = nn.Identity()
-
- self.emb_layers = nn.Sequential(
- nn.SiLU(),
- linear(
- emb_channels,
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
- ),
- )
-
- if self.out_channels % 32 == 0:
- self.out_layers = nn.Sequential(
- normalization(self.out_channels),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(
- conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
- ),
- )
- else:
- self.out_layers = nn.Sequential(
- normalization(self.out_channels, self.out_channels),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(
- conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
- ),
- )
-
- if self.out_channels == channels:
- self.skip_connection = nn.Identity()
- elif use_conv:
- self.skip_connection = conv_nd(
- dims, channels, self.out_channels, 3, padding=1
- )
- else:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
-
- def forward(self, x, emb):
- """
- Apply the block to a Tensor, conditioned on a timestep embedding.
- :param x: an [N x C x ...] Tensor of features.
- :param emb: an [N x emb_channels] Tensor of timestep embeddings.
- :return: an [N x C x ...] Tensor of outputs.
- """
- return checkpoint(
- self._forward, (x, emb), self.parameters(), self.use_checkpoint
- )
-
-
- def _forward(self, x, emb):
- if self.updown:
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
- h = in_rest(x)
- h = self.h_upd(h)
- x = self.x_upd(x)
- h = in_conv(h)
- else:
- h = self.in_layers(x)
- emb_out = self.emb_layers(emb).type(h.dtype)
- while len(emb_out.shape) < len(h.shape):
- emb_out = emb_out[..., None]
- if self.use_scale_shift_norm:
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
- scale, shift = th.chunk(emb_out, 2, dim=1)
- h = out_norm(h) * (1 + scale) + shift
- h = out_rest(h)
- else:
- h = h + emb_out
- h = self.out_layers(h)
- return self.skip_connection(x) + h
-
-class ResBlockDual(TimestepBlockDual):
- """
- A residual block that can optionally change the number of channels.
- :param channels: the number of input channels.
- :param emb_channels: the number of timestep embedding channels.
- :param dropout: the rate of dropout.
- :param out_channels: if specified, the number of out channels.
- :param use_conv: if True and out_channels is specified, use a spatial
- convolution instead of a smaller 1x1 convolution to change the
- channels in the skip connection.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param use_checkpoint: if True, use gradient checkpointing on this module.
- :param up: if True, use this block for upsampling.
- :param down: if True, use this block for downsampling.
- """
-
- def __init__(
- self,
- channels,
- emb_channels,
- dropout,
- semb_channels,
- out_channels=None,
- use_conv=False,
- use_scale_shift_norm=False,
- dims=2,
- use_checkpoint=False,
- up=False,
- down=False,
- ):
- super().__init__()
- self.channels = channels
- self.emb_channels = emb_channels
- self.dropout = dropout
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_checkpoint = use_checkpoint
- self.use_scale_shift_norm = use_scale_shift_norm
-
- self.in_layers = nn.Sequential(
- normalization(channels),
- nn.SiLU(),
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
- )
-
- self.updown = up or down
-
- if up:
- self.h_upd = Upsample(channels, False, dims)
- self.x_upd = Upsample(channels, False, dims)
- elif down:
- self.h_upd = Downsample(channels, False, dims)
- self.x_upd = Downsample(channels, False, dims)
- else:
- self.h_upd = self.x_upd = nn.Identity()
-
- self.emb_layers = nn.Sequential(
- nn.SiLU(),
- linear(
- emb_channels,
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
- ),
- )
-
- # Here we use the built component of SPADE, rather than SFT. Should have no significant influence on the performance.
- self.spade = SPADE(self.out_channels, semb_channels)
-
- self.out_layers = nn.Sequential(
- normalization(self.out_channels),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(
- conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
- ),
- )
-
- if self.out_channels == channels:
- self.skip_connection = nn.Identity()
- elif use_conv:
- self.skip_connection = conv_nd(
- dims, channels, self.out_channels, 3, padding=1
- )
- else:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
-
- def forward(self, x, emb, s_cond):
- """
- Apply the block to a Tensor, conditioned on a timestep embedding.
- :param x: an [N x C x ...] Tensor of features.
- :param emb: an [N x emb_channels] Tensor of timestep embeddings.
- :return: an [N x C x ...] Tensor of outputs.
- """
- return checkpoint(
- self._forward, (x, emb, s_cond), self.parameters(), self.use_checkpoint
- )
-
-
- def _forward(self, x, emb, s_cond):
- if self.updown:
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
- h = in_rest(x)
- h = self.h_upd(h)
- x = self.x_upd(x)
- h = in_conv(h)
- else:
- h = self.in_layers(x)
- emb_out = self.emb_layers(emb).type(h.dtype)
- while len(emb_out.shape) < len(h.shape):
- emb_out = emb_out[..., None]
- if self.use_scale_shift_norm:
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
- scale, shift = th.chunk(emb_out, 2, dim=1)
- h = out_norm(h) * (1 + scale) + shift
- h = out_rest(h)
- else:
- h = h + emb_out
- h = self.out_layers(h)
- h = self.spade(h, s_cond)
- return self.skip_connection(x) + h
-
-class AttentionBlock(nn.Module):
- """
- An attention block that allows spatial positions to attend to each other.
- Originally ported from here, but adapted to the N-d case.
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
- """
-
- def __init__(
- self,
- channels,
- num_heads=1,
- num_head_channels=-1,
- use_checkpoint=False,
- use_new_attention_order=False,
- ):
- super().__init__()
- self.channels = channels
- if num_head_channels == -1:
- self.num_heads = num_heads
- else:
- assert (
- channels % num_head_channels == 0
- ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
- self.num_heads = channels // num_head_channels
- self.use_checkpoint = use_checkpoint
- self.norm = normalization(channels)
- self.qkv = conv_nd(1, channels, channels * 3, 1)
- if use_new_attention_order:
- # split qkv before split heads
- self.attention = QKVAttention(self.num_heads)
- else:
- # split heads before split qkv
- self.attention = QKVAttentionLegacy(self.num_heads)
-
- self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
-
- def forward(self, x):
- return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
- #return pt_checkpoint(self._forward, x) # pytorch
-
- def _forward(self, x):
- b, c, *spatial = x.shape
- x = x.reshape(b, c, -1)
- qkv = self.qkv(self.norm(x))
- h = self.attention(qkv)
- h = self.proj_out(h)
- return (x + h).reshape(b, c, *spatial)
-
-
-def count_flops_attn(model, _x, y):
- """
- A counter for the `thop` package to count the operations in an
- attention operation.
- Meant to be used like:
- macs, params = thop.profile(
- model,
- inputs=(inputs, timestamps),
- custom_ops={QKVAttention: QKVAttention.count_flops},
- )
- """
- b, c, *spatial = y[0].shape
- num_spatial = int(np.prod(spatial))
- # We perform two matmuls with the same number of ops.
- # The first computes the weight matrix, the second computes
- # the combination of the value vectors.
- matmul_ops = 2 * b * (num_spatial ** 2) * c
- model.total_ops += th.DoubleTensor([matmul_ops])
-
-class QKVAttentionLegacy(nn.Module):
- """
- A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
- self.attention_op: Optional[Any] = None
-
- def forward(self, qkv):
- """
- Apply QKV attention.
- :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- if XFORMERS_IS_AVAILBLE:
- q, k, v = map(
- lambda t:t.permute(0,2,1)
- .contiguous(),
- (q, k, v),
- )
- # actually compute the attention, what we cannot get enough of
- a = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
- a = (
- a.permute(0,2,1)
- .reshape(bs, -1, length)
- )
- else:
- weight = th.einsum(
- "bct,bcs->bts", q * scale, k * scale
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v)
- a = a.reshape(bs, -1, length)
- return a
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class QKVAttention(nn.Module):
- """
- A module which performs QKV attention and splits in a different order.
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
- self.attention_op: Optional[Any] = None
-
- def forward(self, qkv):
- """
- Apply QKV attention.
- :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.chunk(3, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- if XFORMERS_IS_AVAILBLE:
- q, k, v = map(
- lambda t:t.permute(0,2,1)
- .contiguous(),
- (q, k, v),
- )
- # actually compute the attention, what we cannot get enough of
- a = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
- a = (
- a.permute(0,2,1)
- .reshape(bs, -1, length)
- )
- else:
- weight = th.einsum(
- "bct,bcs->bts",
- (q * scale).view(bs * self.n_heads, ch, length),
- (k * scale).view(bs * self.n_heads, ch, length),
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
- a = a.reshape(bs, -1, length)
- return a
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class UNetModel(nn.Module):
- """
- The full UNet model with attention and timestep embedding.
- :param in_channels: channels in the input Tensor.
- :param model_channels: base channel count for the model.
- :param out_channels: channels in the output Tensor.
- :param num_res_blocks: number of residual blocks per downsample.
- :param attention_resolutions: a collection of downsample rates at which
- attention will take place. May be a set, list, or tuple.
- For example, if this contains 4, then at 4x downsampling, attention
- will be used.
- :param dropout: the dropout probability.
- :param channel_mult: channel multiplier for each level of the UNet.
- :param conv_resample: if True, use learned convolutions for upsampling and
- downsampling.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param num_classes: if specified (as an int), then this model will be
- class-conditional with `num_classes` classes.
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
- :param num_heads: the number of attention heads in each attention layer.
- :param num_heads_channels: if specified, ignore num_heads and instead use
- a fixed channel width per attention head.
- :param num_heads_upsample: works with num_heads to set a different number
- of heads for upsampling. Deprecated.
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
- :param resblock_updown: use residual blocks for up/downsampling.
- :param use_new_attention_order: use a different attention pattern for potentially
- increased efficiency.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- num_classes=None,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=-1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- resblock_updown=False,
- use_new_attention_order=False,
- use_spatial_transformer=False, # custom transformer support
- transformer_depth=1, # custom transformer support
- context_dim=None, # custom transformer support
- n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
- legacy=True,
- ):
- super().__init__()
- if use_spatial_transformer:
- assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
-
- if context_dim is not None:
- assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
- from omegaconf.listconfig import ListConfig
- if type(context_dim) == ListConfig:
- context_dim = list(context_dim)
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- if num_heads == -1:
- assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
-
- if num_head_channels == -1:
- assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
-
- self.image_size = image_size
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.num_classes = num_classes
- self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
- self.predict_codebook_ids = n_embed is not None
-
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- if self.num_classes is not None:
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
-
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
- )
- ]
- )
- self._feature_size = model_channels
- input_block_chans = [model_channels]
- ch = model_channels
- ds = 1
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=mult * model_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = mult * model_channels
- if ds in attention_resolutions:
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- if legacy:
- #num_heads = 1
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=dim_head,
- use_new_attention_order=use_new_attention_order,
- ) if not use_spatial_transformer else SpatialTransformer(
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- )
- if resblock_updown
- else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- if legacy:
- #num_heads = 1
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=dim_head,
- use_new_attention_order=use_new_attention_order,
- ) if not use_spatial_transformer else SpatialTransformer(
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
- ),
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- )
- self._feature_size += ch
-
- self.output_blocks = nn.ModuleList([])
- for level, mult in list(enumerate(channel_mult))[::-1]:
- for i in range(num_res_blocks + 1):
- ich = input_block_chans.pop()
- layers = [
- ResBlock(
- ch + ich,
- time_embed_dim,
- dropout,
- out_channels=model_channels * mult,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = model_channels * mult
- if ds in attention_resolutions:
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- if legacy:
- #num_heads = 1
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads_upsample,
- num_head_channels=dim_head,
- use_new_attention_order=use_new_attention_order,
- ) if not use_spatial_transformer else SpatialTransformer(
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
- )
- )
- if level and i == num_res_blocks:
- out_ch = ch
- layers.append(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- up=True,
- )
- if resblock_updown
- else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
- )
- ds //= 2
- self.output_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
-
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
- )
- if self.predict_codebook_ids:
- self.id_predictor = nn.Sequential(
- normalization(ch),
- conv_nd(dims, model_channels, n_embed, 1),
- #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
- )
-
- def convert_to_fp16(self):
- """
- Convert the torso of the model to float16.
- """
- self.input_blocks.apply(convert_module_to_f16)
- self.middle_block.apply(convert_module_to_f16)
- self.output_blocks.apply(convert_module_to_f16)
-
- def convert_to_fp32(self):
- """
- Convert the torso of the model to float32.
- """
- self.input_blocks.apply(convert_module_to_f32)
- self.middle_block.apply(convert_module_to_f32)
- self.output_blocks.apply(convert_module_to_f32)
-
- def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
- """
- Apply the model to an input batch.
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :param context: conditioning plugged in via crossattn
- :param y: an [N] Tensor of labels, if class-conditional.
- :return: an [N x C x ...] Tensor of outputs.
- """
- assert (y is not None) == (
- self.num_classes is not None
- ), "must specify y if and only if the model is class-conditional"
- hs = []
- t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
- emb = self.time_embed(t_emb)
-
- if self.num_classes is not None:
- assert y.shape == (x.shape[0],)
- emb = emb + self.label_emb(y)
-
- h = x.type(self.dtype)
- for module in self.input_blocks:
- h = module(h, emb, context)
- hs.append(h)
- h = self.middle_block(h, emb, context)
- for module in self.output_blocks:
- h = th.cat([h, hs.pop()], dim=1)
- h = module(h, emb, context)
- h = h.type(x.dtype)
- if self.predict_codebook_ids:
- return self.id_predictor(h)
- else:
- return self.out(h)
-
-class UNetModelDualcondV2(nn.Module):
- """
- The full UNet model with attention and timestep embedding.
- :param in_channels: channels in the input Tensor.
- :param model_channels: base channel count for the model.
- :param out_channels: channels in the output Tensor.
- :param num_res_blocks: number of residual blocks per downsample.
- :param attention_resolutions: a collection of downsample rates at which
- attention will take place. May be a set, list, or tuple.
- For example, if this contains 4, then at 4x downsampling, attention
- will be used.
- :param dropout: the dropout probability.
- :param channel_mult: channel multiplier for each level of the UNet.
- :param conv_resample: if True, use learned convolutions for upsampling and
- downsampling.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param num_classes: if specified (as an int), then this model will be
- class-conditional with `num_classes` classes.
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
- :param num_heads: the number of attention heads in each attention layer.
- :param num_heads_channels: if specified, ignore num_heads and instead use
- a fixed channel width per attention head.
- :param num_heads_upsample: works with num_heads to set a different number
- of heads for upsampling. Deprecated.
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
- :param resblock_updown: use residual blocks for up/downsampling.
- :param use_new_attention_order: use a different attention pattern for potentially
- increased efficiency.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- num_classes=None,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=-1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- resblock_updown=False,
- use_new_attention_order=False,
- use_spatial_transformer=False, # custom transformer support
- transformer_depth=1, # custom transformer support
- context_dim=None, # custom transformer support
- n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
- legacy=True,
- disable_self_attentions=None,
- num_attention_blocks=None,
- disable_middle_self_attn=False,
- use_linear_in_transformer=False,
- semb_channels=None
- ):
- super().__init__()
- if use_spatial_transformer:
- assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
-
- if context_dim is not None:
- assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
- from omegaconf.listconfig import ListConfig
- if type(context_dim) == ListConfig:
- context_dim = list(context_dim)
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- if num_heads == -1:
- assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
-
- if num_head_channels == -1:
- assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
-
- self.image_size = image_size
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- if isinstance(num_res_blocks, int):
- self.num_res_blocks = len(channel_mult) * [num_res_blocks]
- else:
- if len(num_res_blocks) != len(channel_mult):
- raise ValueError("provide num_res_blocks either as an int (globally constant) or "
- "as a list/tuple (per-level) with the same length as channel_mult")
- self.num_res_blocks = num_res_blocks
- if disable_self_attentions is not None:
- # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
- assert len(disable_self_attentions) == len(channel_mult)
- if num_attention_blocks is not None:
- assert len(num_attention_blocks) == len(self.num_res_blocks)
- assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
- print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
- f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
- f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
- f"attention will still not be set.")
-
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.num_classes = num_classes
- self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
- self.predict_codebook_ids = n_embed is not None
-
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- if self.num_classes is not None:
- if isinstance(self.num_classes, int):
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
- elif self.num_classes == "continuous":
- print("setting up linear c_adm embedding layer")
- self.label_emb = nn.Linear(1, time_embed_dim)
- else:
- raise ValueError()
-
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
- )
- ]
- )
- self._feature_size = model_channels
- input_block_chans = [model_channels]
- ch = model_channels
- ds = 1
- for level, mult in enumerate(channel_mult):
- for nr in range(self.num_res_blocks[level]):
- layers = [
- ResBlockDual(
- ch,
- time_embed_dim,
- dropout,
- semb_channels=semb_channels,
- out_channels=mult * model_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = mult * model_channels
- if ds in attention_resolutions:
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- if legacy:
- #num_heads = 1
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
- if exists(disable_self_attentions):
- disabled_sa = disable_self_attentions[level]
- else:
- disabled_sa = False
-
- if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=dim_head,
- use_new_attention_order=use_new_attention_order,
- ) if not use_spatial_transformer else SpatialTransformerV2(
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
- disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
- use_checkpoint=use_checkpoint
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlockDual(
- ch,
- time_embed_dim,
- dropout,
- semb_channels=semb_channels,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- )
- if resblock_updown
- else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- if legacy:
- #num_heads = 1
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
- self.middle_block = TimestepEmbedSequential(
- ResBlockDual(
- ch,
- time_embed_dim,
- dropout,
- semb_channels=semb_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=dim_head,
- use_new_attention_order=use_new_attention_order,
- ) if not use_spatial_transformer else SpatialTransformerV2( # always uses a self-attn
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
- disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
- use_checkpoint=use_checkpoint
- ),
- ResBlockDual(
- ch,
- time_embed_dim,
- dropout,
- semb_channels=semb_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- )
- self._feature_size += ch
-
- self.output_blocks = nn.ModuleList([])
- for level, mult in list(enumerate(channel_mult))[::-1]:
- for i in range(self.num_res_blocks[level] + 1):
- ich = input_block_chans.pop()
- layers = [
- ResBlockDual(
- ch + ich,
- time_embed_dim,
- dropout,
- semb_channels=semb_channels,
- out_channels=model_channels * mult,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = model_channels * mult
- if ds in attention_resolutions:
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- if legacy:
- #num_heads = 1
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
- if exists(disable_self_attentions):
- disabled_sa = disable_self_attentions[level]
- else:
- disabled_sa = False
-
- if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads_upsample,
- num_head_channels=dim_head,
- use_new_attention_order=use_new_attention_order,
- ) if not use_spatial_transformer else SpatialTransformerV2(
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
- disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
- use_checkpoint=use_checkpoint
- )
- )
- if level and i == self.num_res_blocks[level]:
- out_ch = ch
- layers.append(
- ResBlockDual(
- ch,
- time_embed_dim,
- dropout,
- semb_channels=semb_channels,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- up=True,
- )
- if resblock_updown
- else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
- )
- ds //= 2
- self.output_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
-
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
- )
- if self.predict_codebook_ids:
- self.id_predictor = nn.Sequential(
- normalization(ch),
- conv_nd(dims, model_channels, n_embed, 1),
- #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
- )
-
- def convert_to_fp16(self):
- """
- Convert the torso of the model to float16.
- """
- self.input_blocks.apply(convert_module_to_f16)
- self.middle_block.apply(convert_module_to_f16)
- self.output_blocks.apply(convert_module_to_f16)
-
- def convert_to_fp32(self):
- """
- Convert the torso of the model to float32.
- """
- self.input_blocks.apply(convert_module_to_f32)
- self.middle_block.apply(convert_module_to_f32)
- self.output_blocks.apply(convert_module_to_f32)
-
- def forward(self, x, timesteps=None, context=None, struct_cond=None, y=None,**kwargs):
- """
- Apply the model to an input batch.
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :param context: conditioning plugged in via crossattn
- :param y: an [N] Tensor of labels, if class-conditional.
- :return: an [N x C x ...] Tensor of outputs.
- """
- assert (y is not None) == (
- self.num_classes is not None
- ), "must specify y if and only if the model is class-conditional"
- hs = []
- t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
- emb = self.time_embed(t_emb)
-
- if self.num_classes is not None:
- assert y.shape == (x.shape[0],)
- emb = emb + self.label_emb(y)
-
- h = x.type(self.dtype)
- for module in self.input_blocks:
- h = module(h, emb, context, struct_cond)
- hs.append(h)
- h = self.middle_block(h, emb, context, struct_cond)
- for module in self.output_blocks:
- h = th.cat([h, hs.pop()], dim=1)
- h = module(h, emb, context, struct_cond)
- h = h.type(x.dtype)
- if self.predict_codebook_ids:
- return self.id_predictor(h)
- else:
- return self.out(h)
-
-class EncoderUNetModelWT(nn.Module):
- """
- The half UNet model with attention and timestep embedding.
- For usage, see UNet.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- resblock_updown=False,
- use_new_attention_order=False,
- *args,
- **kwargs
- ):
- super().__init__()
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
-
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
- )
- ]
- )
- self._feature_size = model_channels
- input_block_chans = []
- ch = model_channels
- ds = 1
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=mult * model_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = mult * model_channels
- if ds in attention_resolutions:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- )
- if resblock_updown
- else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- ),
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- )
- input_block_chans.append(ch)
- self._feature_size += ch
- self.input_block_chans = input_block_chans
-
- self.fea_tran = nn.ModuleList([])
-
- for i in range(len(input_block_chans)):
- self.fea_tran.append(
- ResBlock(
- input_block_chans[i],
- time_embed_dim,
- dropout,
- out_channels=out_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- )
-
- def convert_to_fp16(self):
- """
- Convert the torso of the model to float16.
- """
- self.input_blocks.apply(convert_module_to_f16)
- self.middle_block.apply(convert_module_to_f16)
-
- def convert_to_fp32(self):
- """
- Convert the torso of the model to float32.
- """
- self.input_blocks.apply(convert_module_to_f32)
- self.middle_block.apply(convert_module_to_f32)
-
- def forward(self, x, timesteps):
- """
- Apply the model to an input batch.
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :return: an [N x K] Tensor of outputs.
- """
- emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
-
- result_list = []
- results = {}
- h = x.type(self.dtype)
- for module in self.input_blocks:
- last_h = h
- h = module(h, emb)
- if h.size(-1) != last_h.size(-1):
- result_list.append(last_h)
- h = self.middle_block(h, emb)
- result_list.append(h)
-
- assert len(result_list) == len(self.fea_tran)
-
- for i in range(len(result_list)):
- results[str(result_list[i].size(-1))] = self.fea_tran[i](result_list[i], emb)
-
- return results
diff --git a/spaces/InpaintAI/Inpaint-Anything/utils/paste_object.py b/spaces/InpaintAI/Inpaint-Anything/utils/paste_object.py
deleted file mode 100644
index c4a4a040cfe5b7a9a76c40e4d0ca4a63cee51227..0000000000000000000000000000000000000000
--- a/spaces/InpaintAI/Inpaint-Anything/utils/paste_object.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import cv2
-import numpy as np
-
-def paste_object(source, source_mask, target, target_coords, resize_scale=1):
- assert target_coords[0] < target.shape[1] and target_coords[1] < target.shape[0]
- # Find the bounding box of the source_mask
- x, y, w, h = cv2.boundingRect(source_mask)
- assert h < source.shape[0] and w < source.shape[1]
- obj = source[y:y+h, x:x+w]
- obj_msk = source_mask[y:y+h, x:x+w]
- if resize_scale != 1:
- obj = cv2.resize(obj, (0,0), fx=resize_scale, fy=resize_scale)
- obj_msk = cv2.resize(obj_msk, (0,0), fx=resize_scale, fy=resize_scale)
- _, _, w, h = cv2.boundingRect(obj_msk)
-
- xt = max(0, target_coords[0]-w//2)
- yt = max(0, target_coords[1]-h//2)
- if target_coords[0]-w//2 < 0:
- obj = obj[:, w//2-target_coords[0]:]
- obj_msk = obj_msk[:, w//2-target_coords[0]:]
- if target_coords[0]+w//2 > target.shape[1]:
- obj = obj[:, :target.shape[1]-target_coords[0]+w//2]
- obj_msk = obj_msk[:, :target.shape[1]-target_coords[0]+w//2]
- if target_coords[1]-h//2 < 0:
- obj = obj[h//2-target_coords[1]:, :]
- obj_msk = obj_msk[h//2-target_coords[1]:, :]
- if target_coords[1]+h//2 > target.shape[0]:
- obj = obj[:target.shape[0]-target_coords[1]+h//2, :]
- obj_msk = obj_msk[:target.shape[0]-target_coords[1]+h//2, :]
- _, _, w, h = cv2.boundingRect(obj_msk)
-
- target[yt:yt+h, xt:xt+w][obj_msk==255] = obj[obj_msk==255]
- target_mask = np.zeros_like(target)
- target_mask = cv2.cvtColor(target_mask, cv2.COLOR_BGR2GRAY)
- target_mask[yt:yt+h, xt:xt+w][obj_msk==255] = 255
-
- return target, target_mask
-
-if __name__ == '__main__':
- source = cv2.imread('example/boat.jpg')
- source_mask = cv2.imread('example/boat_mask_1.png', 0)
- target = cv2.imread('example/hippopotamus.jpg')
- print(source.shape, source_mask.shape, target.shape)
-
- target_coords = (700, 400) # (x, y)
- resize_scale = 1
- target, target_mask = paste_object(source, source_mask, target, target_coords, resize_scale)
- cv2.imwrite('target_pasted.png', target)
- cv2.imwrite('target_mask.png', target_mask)
- print(target.shape, target_mask.shape)
\ No newline at end of file
diff --git a/spaces/Jamkonams/AutoGPT/autogpt/config/ai_config.py b/spaces/Jamkonams/AutoGPT/autogpt/config/ai_config.py
deleted file mode 100644
index d50c30beee9dc8009f63415378ae1c6a399f0037..0000000000000000000000000000000000000000
--- a/spaces/Jamkonams/AutoGPT/autogpt/config/ai_config.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# sourcery skip: do-not-use-staticmethod
-"""
-A module that contains the AIConfig class object that contains the configuration
-"""
-from __future__ import annotations
-
-import os
-from typing import Type
-
-import yaml
-
-
-class AIConfig:
- """
- A class object that contains the configuration information for the AI
-
- Attributes:
- ai_name (str): The name of the AI.
- ai_role (str): The description of the AI's role.
- ai_goals (list): The list of objectives the AI is supposed to complete.
- """
-
- def __init__(
- self, ai_name: str = "", ai_role: str = "", ai_goals: list | None = None
- ) -> None:
- """
- Initialize a class instance
-
- Parameters:
- ai_name (str): The name of the AI.
- ai_role (str): The description of the AI's role.
- ai_goals (list): The list of objectives the AI is supposed to complete.
- Returns:
- None
- """
- if ai_goals is None:
- ai_goals = []
- self.ai_name = ai_name
- self.ai_role = ai_role
- self.ai_goals = ai_goals
-
- # Soon this will go in a folder where it remembers more stuff about the run(s)
- SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml")
-
- @staticmethod
- def load(config_file: str = SAVE_FILE) -> "AIConfig":
- """
- Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from
- yaml file if yaml file exists,
- else returns class with no parameters.
-
- Parameters:
- config_file (int): The path to the config yaml file.
- DEFAULT: "../ai_settings.yaml"
-
- Returns:
- cls (object): An instance of given cls object
- """
-
- try:
- with open(config_file, encoding="utf-8") as file:
- config_params = yaml.load(file, Loader=yaml.FullLoader)
- except FileNotFoundError:
- config_params = {}
-
- ai_name = config_params.get("ai_name", "")
- ai_role = config_params.get("ai_role", "")
- ai_goals = config_params.get("ai_goals", [])
- # type: Type[AIConfig]
- return AIConfig(ai_name, ai_role, ai_goals)
-
- def save(self, config_file: str = SAVE_FILE) -> None:
- """
- Saves the class parameters to the specified file yaml file path as a yaml file.
-
- Parameters:
- config_file(str): The path to the config yaml file.
- DEFAULT: "../ai_settings.yaml"
-
- Returns:
- None
- """
-
- config = {
- "ai_name": self.ai_name,
- "ai_role": self.ai_role,
- "ai_goals": self.ai_goals,
- }
- with open(config_file, "w", encoding="utf-8") as file:
- yaml.dump(config, file, allow_unicode=True)
-
- def construct_full_prompt(self) -> str:
- """
- Returns a prompt to the user with the class information in an organized fashion.
-
- Parameters:
- None
-
- Returns:
- full_prompt (str): A string containing the initial prompt for the user
- including the ai_name, ai_role and ai_goals.
- """
-
- prompt_start = (
- "Your decisions must always be made independently without"
- " seeking user assistance. Play to your strengths as an LLM and pursue"
- " simple strategies with no legal complications."
- ""
- )
-
- from autogpt.prompt import get_prompt
-
- # Construct full prompt
- full_prompt = (
- f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
- )
- for i, goal in enumerate(self.ai_goals):
- full_prompt += f"{i+1}. {goal}\n"
-
- full_prompt += f"\n\n{get_prompt()}"
- return full_prompt
diff --git a/spaces/Jamos1/AI_gamer89-insta/README.md b/spaces/Jamos1/AI_gamer89-insta/README.md
deleted file mode 100644
index f30d4256155c480f0599698379f798a3365e5bc1..0000000000000000000000000000000000000000
--- a/spaces/Jamos1/AI_gamer89-insta/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Youtube Whisperer
-emoji: ⚡
-colorFrom: purple
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.3.1
-app_file: app.py
-pinned: false
-duplicated_from: jeffistyping/Youtube-Whisperer
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Jeff2323/ai-comic-factory/src/lib/computeSha256.ts b/spaces/Jeff2323/ai-comic-factory/src/lib/computeSha256.ts
deleted file mode 100644
index cb6ef0604fca9653408012fd6cef2a58b6acaf47..0000000000000000000000000000000000000000
--- a/spaces/Jeff2323/ai-comic-factory/src/lib/computeSha256.ts
+++ /dev/null
@@ -1,14 +0,0 @@
-import { createHash } from 'node:crypto'
-
-/**
- * Returns a SHA256 hash using SHA-3 for the given `content`.
- *
- * @see https://en.wikipedia.org/wiki/SHA-3
- *
- * @param {String} content
- *
- * @returns {String}
- */
-export function computeSha256(strContent: string) {
- return createHash('sha3-256').update(strContent).digest('hex')
-}
\ No newline at end of file
diff --git a/spaces/Joeythemonster/magic-diffusion/README.md b/spaces/Joeythemonster/magic-diffusion/README.md
deleted file mode 100644
index 18fae13e602dafc9509de23e20d0f7a7d7272cb6..0000000000000000000000000000000000000000
--- a/spaces/Joeythemonster/magic-diffusion/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Magic Prompt
-emoji: 🎆
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.12.0
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: huggingface-projects/magic-diffusion
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Jonni/01-3DModel_Gradio/files/readme.md b/spaces/Jonni/01-3DModel_Gradio/files/readme.md
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Jour/Translation-to-small/README.md b/spaces/Jour/Translation-to-small/README.md
deleted file mode 100644
index d9f10f6ba905575ad1b62f9ffe3a8d0ff6b9acd0..0000000000000000000000000000000000000000
--- a/spaces/Jour/Translation-to-small/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Translate With Bloom
-emoji: 🐠
-colorFrom: yellow
-colorTo: pink
-sdk: gradio
-sdk_version: 3.0.26
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: Jour/Translate
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/KAIST-Geometric-AI-Lab/salad-demo/salad/utils/visutil.py b/spaces/KAIST-Geometric-AI-Lab/salad-demo/salad/utils/visutil.py
deleted file mode 100644
index db34f52536d3176447e74ef75c5ab19ec62efde9..0000000000000000000000000000000000000000
--- a/spaces/KAIST-Geometric-AI-Lab/salad-demo/salad/utils/visutil.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import numpy as np
-import trimesh
-import matplotlib.pyplot as plt
-####
-# TODO: lift the dependency on fresnelvis
-# from salad.utils import nputil, thutil, fresnelvis
-from salad.utils import nputil, thutil
-####
-from PIL import Image
-
-
-def render_pointcloud(
- pointcloud,
- camPos=np.array([-2, 2, -2]),
- camLookat=np.array([0.0, 0.0, 0.0]),
- camUp=np.array([0, 1, 0]),
- camHeight=2,
- resolution=(512, 512),
- samples=16,
- cloudR=0.006,
-):
- pointcloud = thutil.th2np(pointcloud)
- img = fresnelvis.renderMeshCloud(
- cloud=pointcloud,
- camPos=camPos,
- camLookat=camLookat,
- camUp=camUp,
- camHeight=camHeight,
- resolution=resolution,
- samples=samples,
- cloudR=cloudR,
- )
- return Image.fromarray(img)
-
-
-def render_mesh(
- vert,
- face,
- camPos=np.array([-2, 2, -2]),
- camLookat=np.array([0, 0, 0.0]),
- camUp=np.array([0, 1, 0]),
- camHeight=2,
- resolution=(512, 512),
- samples=16,
-):
- vert, face = list(map(lambda x: thutil.th2np(x), [vert, face]))
- mesh = {"vert": vert, "face": face}
- img = fresnelvis.renderMeshCloud(
- mesh=mesh,
- camPos=camPos,
- camLookat=camLookat,
- camUp=camUp,
- camHeight=camHeight,
- resolution=resolution,
- samples=samples,
- )
- return Image.fromarray(img)
-
-
-def render_gaussians(
- gaussians,
- is_bspnet=False,
- multiplier=1.0,
- gaussians_colors=None,
- attn_map=None,
- camPos=np.array([-2, 2, -2]),
- camLookat=np.array([0.0, 0, 0]),
- camUp=np.array([0, 1, 0]),
- camHeight=2,
- resolution=(512, 512),
- samples=16,
-):
- gaussians = thutil.th2np(gaussians)
- N = gaussians.shape[0]
- cmap = plt.get_cmap("jet")
-
- if attn_map is not None:
- assert N == attn_map.shape[0]
- vmin, vmax = attn_map.min(), attn_map.max()
- if vmin == vmax:
- normalized_attn_map = np.zeros_like(attn_map)
- else:
- normalized_attn_map = (attn_map - vmin) / (vmax - vmin)
-
- cmap = plt.get_cmap("viridis")
-
- lights = "rembrandt"
- camera_kwargs = dict(
- camPos=camPos,
- camLookat=camLookat,
- camUp=camUp,
- camHeight=camHeight,
- resolution=resolution,
- samples=samples,
- )
- renderer = fresnelvis.FresnelRenderer(lights=lights, camera_kwargs=camera_kwargs)
- for i, g in enumerate(gaussians):
- if is_bspnet:
- mu, eival, eivec = g[:3], g[3:6], g[6:15]
- else:
- mu, eivec, eival = g[:3], g[3:12], g[13:]
- R = eivec.reshape(3, 3).T
- scale = multiplier * np.sqrt(eival)
- scale_transform = np.diag((*scale, 1))
- rigid_transform = np.hstack((R, mu.reshape(3, 1)))
- rigid_transform = np.vstack((rigid_transform, [0, 0, 0, 1]))
- sphere = trimesh.creation.icosphere()
- sphere.apply_transform(scale_transform)
- sphere.apply_transform(rigid_transform)
- if attn_map is None and gaussians_colors is None:
- color = np.array(cmap(i / N)[:3])
- elif attn_map is not None:
- color = np.array(cmap(normalized_attn_map[i])[:3])
- else:
- color = gaussians_colors[i]
-
- renderer.add_mesh(
- sphere.vertices, sphere.faces, color=color, outline_width=None
- )
- image = renderer.render()
- return Image.fromarray(image)
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/easy_infer.py b/spaces/Kangarroar/ApplioRVC-Inference/easy_infer.py
deleted file mode 100644
index 5f32bc37e6702cd7288df66059d4a09bc0e3d6aa..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/easy_infer.py
+++ /dev/null
@@ -1,1398 +0,0 @@
-import subprocess
-import os
-import sys
-import errno
-import shutil
-import yt_dlp
-from mega import Mega
-import datetime
-import unicodedata
-import torch
-import glob
-import gradio as gr
-import gdown
-import zipfile
-import traceback
-import json
-import mdx
-from mdx_processing_script import get_model_list,id_to_ptm,prepare_mdx,run_mdx
-import requests
-import wget
-import ffmpeg
-import hashlib
-now_dir = os.getcwd()
-sys.path.append(now_dir)
-from unidecode import unidecode
-import re
-import time
-from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
-from infer.modules.vc.pipeline import Pipeline
-VC = Pipeline
-from lib.infer_pack.models import (
- SynthesizerTrnMs256NSFsid,
- SynthesizerTrnMs256NSFsid_nono,
- SynthesizerTrnMs768NSFsid,
- SynthesizerTrnMs768NSFsid_nono,
-)
-from MDXNet import MDXNetDereverb
-from configs.config import Config
-from infer_uvr5 import _audio_pre_, _audio_pre_new
-from huggingface_hub import HfApi, list_models
-from huggingface_hub import login
-from i18n import I18nAuto
-i18n = I18nAuto()
-from bs4 import BeautifulSoup
-from sklearn.cluster import MiniBatchKMeans
-from dotenv import load_dotenv
-load_dotenv()
-config = Config()
-tmp = os.path.join(now_dir, "TEMP")
-shutil.rmtree(tmp, ignore_errors=True)
-os.environ["TEMP"] = tmp
-weight_root = os.getenv("weight_root")
-weight_uvr5_root = os.getenv("weight_uvr5_root")
-index_root = os.getenv("index_root")
-audio_root = "audios"
-names = []
-for name in os.listdir(weight_root):
- if name.endswith(".pth"):
- names.append(name)
-index_paths = []
-
-global indexes_list
-indexes_list = []
-
-audio_paths = []
-for root, dirs, files in os.walk(index_root, topdown=False):
- for name in files:
- if name.endswith(".index") and "trained" not in name:
- index_paths.append("%s\\%s" % (root, name))
-
-for root, dirs, files in os.walk(audio_root, topdown=False):
- for name in files:
- audio_paths.append("%s/%s" % (root, name))
-
-uvr5_names = []
-for name in os.listdir(weight_uvr5_root):
- if name.endswith(".pth") or "onnx" in name:
- uvr5_names.append(name.replace(".pth", ""))
-
-def calculate_md5(file_path):
- hash_md5 = hashlib.md5()
- with open(file_path, "rb") as f:
- for chunk in iter(lambda: f.read(4096), b""):
- hash_md5.update(chunk)
- return hash_md5.hexdigest()
-
-def format_title(title):
- formatted_title = re.sub(r'[^\w\s-]', '', title)
- formatted_title = formatted_title.replace(" ", "_")
- return formatted_title
-
-def silentremove(filename):
- try:
- os.remove(filename)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
-def get_md5(temp_folder):
- for root, subfolders, files in os.walk(temp_folder):
- for file in files:
- if not file.startswith("G_") and not file.startswith("D_") and file.endswith(".pth") and not "_G_" in file and not "_D_" in file:
- md5_hash = calculate_md5(os.path.join(root, file))
- return md5_hash
-
- return None
-
-def find_parent(search_dir, file_name):
- for dirpath, dirnames, filenames in os.walk(search_dir):
- if file_name in filenames:
- return os.path.abspath(dirpath)
- return None
-
-def find_folder_parent(search_dir, folder_name):
- for dirpath, dirnames, filenames in os.walk(search_dir):
- if folder_name in dirnames:
- return os.path.abspath(dirpath)
- return None
-
-
-def delete_large_files(directory_path, max_size_megabytes):
- for filename in os.listdir(directory_path):
- file_path = os.path.join(directory_path, filename)
- if os.path.isfile(file_path):
- size_in_bytes = os.path.getsize(file_path)
- size_in_megabytes = size_in_bytes / (1024 * 1024) # Convert bytes to megabytes
-
- if size_in_megabytes > max_size_megabytes:
- print("###################################")
- print(f"Deleting s*** {filename} (Size: {size_in_megabytes:.2f} MB)")
- os.remove(file_path)
- print("###################################")
-
-def download_from_url(url):
- parent_path = find_folder_parent(".", "pretrained_v2")
- zips_path = os.path.join(parent_path, 'zips')
- print(f"Limit download size in MB {os.getenv('MAX_DOWNLOAD_SIZE')}, duplicate the space for modify the limit")
-
- if url != '':
- print(i18n("Downloading the file: ") + f"{url}")
- if "drive.google.com" in url:
- if "file/d/" in url:
- file_id = url.split("file/d/")[1].split("/")[0]
- elif "id=" in url:
- file_id = url.split("id=")[1].split("&")[0]
- else:
- return None
-
- if file_id:
- os.chdir('./zips')
- result = subprocess.run(["gdown", f"https://drive.google.com/uc?id={file_id}", "--fuzzy"], capture_output=True, text=True, encoding='utf-8')
- if "Too many users have viewed or downloaded this file recently" in str(result.stderr):
- return "too much use"
- if "Cannot retrieve the public link of the file." in str(result.stderr):
- return "private link"
- print(result.stderr)
-
- elif "/blob/" in url:
- os.chdir('./zips')
- url = url.replace("blob", "resolve")
- response = requests.get(url)
- if response.status_code == 200:
- file_name = url.split('/')[-1]
- with open(os.path.join(zips_path, file_name), "wb") as newfile:
- newfile.write(response.content)
- else:
- os.chdir(parent_path)
- elif "mega.nz" in url:
- if "#!" in url:
- file_id = url.split("#!")[1].split("!")[0]
- elif "file/" in url:
- file_id = url.split("file/")[1].split("/")[0]
- else:
- return None
- if file_id:
- m = Mega()
- m.download_url(url, zips_path)
- elif "/tree/main" in url:
- response = requests.get(url)
- soup = BeautifulSoup(response.content, 'html.parser')
- temp_url = ''
- for link in soup.find_all('a', href=True):
- if link['href'].endswith('.zip'):
- temp_url = link['href']
- break
- if temp_url:
- url = temp_url
- url = url.replace("blob", "resolve")
- if "huggingface.co" not in url:
- url = "https://huggingface.co" + url
-
- wget.download(url)
- else:
- print("No .zip file found on the page.")
- elif "cdn.discordapp.com" in url:
- file = requests.get(url)
- if file.status_code == 200:
- name = url.split('/')
- with open(os.path.join(zips_path, name[len(name)-1]), "wb") as newfile:
- newfile.write(file.content)
- else:
- return None
- elif "pixeldrain.com" in url:
- try:
- file_id = url.split("pixeldrain.com/u/")[1]
- os.chdir('./zips')
- print(file_id)
- response = requests.get(f"https://pixeldrain.com/api/file/{file_id}")
- if response.status_code == 200:
- file_name = response.headers.get("Content-Disposition").split('filename=')[-1].strip('";')
- if not os.path.exists(zips_path):
- os.makedirs(zips_path)
- with open(os.path.join(zips_path, file_name), "wb") as newfile:
- newfile.write(response.content)
- os.chdir(parent_path)
- return "downloaded"
- else:
- os.chdir(parent_path)
- return None
- except Exception as e:
- print(e)
- os.chdir(parent_path)
- return None
- else:
- os.chdir('./zips')
- wget.download(url)
-
- #os.chdir('./zips')
- delete_large_files(zips_path, int(os.getenv("MAX_DOWNLOAD_SIZE")))
- os.chdir(parent_path)
- print(i18n("Full download"))
- return "downloaded"
- else:
- return None
-
-class error_message(Exception):
- def __init__(self, mensaje):
- self.mensaje = mensaje
- super().__init__(mensaje)
-
-def get_vc(sid, to_return_protect0, to_return_protect1):
- global n_spk, tgt_sr, net_g, vc, cpt, version
- if sid == "" or sid == []:
- global hubert_model
- if hubert_model is not None:
- print("clean_empty_cache")
- del net_g, n_spk, vc, hubert_model, tgt_sr
- hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- if_f0 = cpt.get("f0", 1)
- version = cpt.get("version", "v1")
- if version == "v1":
- if if_f0 == 1:
- net_g = SynthesizerTrnMs256NSFsid(
- *cpt["config"], is_half=config.is_half
- )
- else:
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
- elif version == "v2":
- if if_f0 == 1:
- net_g = SynthesizerTrnMs768NSFsid(
- *cpt["config"], is_half=config.is_half
- )
- else:
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
- del net_g, cpt
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- cpt = None
- return (
- {"visible": False, "__type__": "update"},
- {"visible": False, "__type__": "update"},
- {"visible": False, "__type__": "update"},
- )
- person = "%s/%s" % (weight_root, sid)
- print("loading %s" % person)
- cpt = torch.load(person, map_location="cpu")
- tgt_sr = cpt["config"][-1]
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
- if_f0 = cpt.get("f0", 1)
- if if_f0 == 0:
- to_return_protect0 = to_return_protect1 = {
- "visible": False,
- "value": 0.5,
- "__type__": "update",
- }
- else:
- to_return_protect0 = {
- "visible": True,
- "value": to_return_protect0,
- "__type__": "update",
- }
- to_return_protect1 = {
- "visible": True,
- "value": to_return_protect1,
- "__type__": "update",
- }
- version = cpt.get("version", "v1")
- if version == "v1":
- if if_f0 == 1:
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
- else:
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
- elif version == "v2":
- if if_f0 == 1:
- net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
- else:
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
- del net_g.enc_q
- print(net_g.load_state_dict(cpt["weight"], strict=False))
- net_g.eval().to(config.device)
- if config.is_half:
- net_g = net_g.half()
- else:
- net_g = net_g.float()
- vc = VC(tgt_sr, config)
- n_spk = cpt["config"][-3]
- return (
- {"visible": True, "maximum": n_spk, "__type__": "update"},
- to_return_protect0,
- to_return_protect1,
- )
-
-def load_downloaded_model(url):
- parent_path = find_folder_parent(".", "pretrained_v2")
- try:
- infos = []
- logs_folders = ['0_gt_wavs','1_16k_wavs','2a_f0','2b-f0nsf','3_feature256','3_feature768']
- zips_path = os.path.join(parent_path, 'zips')
- unzips_path = os.path.join(parent_path, 'unzips')
- weights_path = os.path.join(parent_path, 'weights')
- logs_dir = ""
-
- if os.path.exists(zips_path):
- shutil.rmtree(zips_path)
- if os.path.exists(unzips_path):
- shutil.rmtree(unzips_path)
-
- os.mkdir(zips_path)
- os.mkdir(unzips_path)
-
- download_file = download_from_url(url)
- if not download_file:
- print(i18n("The file could not be downloaded."))
- infos.append(i18n("The file could not be downloaded."))
- yield "\n".join(infos)
- elif download_file == "downloaded":
- print(i18n("It has been downloaded successfully."))
- infos.append(i18n("It has been downloaded successfully."))
- yield "\n".join(infos)
- elif download_file == "too much use":
- raise Exception(i18n("Too many users have recently viewed or downloaded this file"))
- elif download_file == "private link":
- raise Exception(i18n("Cannot get file from this private link"))
-
- for filename in os.listdir(zips_path):
- if filename.endswith(".zip"):
- zipfile_path = os.path.join(zips_path,filename)
- print(i18n("Proceeding with the extraction..."))
- infos.append(i18n("Proceeding with the extraction..."))
- shutil.unpack_archive(zipfile_path, unzips_path, 'zip')
- model_name = os.path.basename(zipfile_path)
- logs_dir = os.path.join(parent_path,'logs', os.path.normpath(str(model_name).replace(".zip","")))
- yield "\n".join(infos)
- else:
- print(i18n("Unzip error."))
- infos.append(i18n("Unzip error."))
- yield "\n".join(infos)
-
- index_file = False
- model_file = False
- D_file = False
- G_file = False
-
- for path, subdirs, files in os.walk(unzips_path):
- for item in files:
- item_path = os.path.join(path, item)
- if not 'G_' in item and not 'D_' in item and item.endswith('.pth'):
- model_file = True
- model_name = item.replace(".pth","")
- logs_dir = os.path.join(parent_path,'logs', model_name)
- if os.path.exists(logs_dir):
- shutil.rmtree(logs_dir)
- os.mkdir(logs_dir)
- if not os.path.exists(weights_path):
- os.mkdir(weights_path)
- if os.path.exists(os.path.join(weights_path, item)):
- os.remove(os.path.join(weights_path, item))
- if os.path.exists(item_path):
- shutil.move(item_path, weights_path)
-
- if not model_file and not os.path.exists(logs_dir):
- os.mkdir(logs_dir)
- for path, subdirs, files in os.walk(unzips_path):
- for item in files:
- item_path = os.path.join(path, item)
- if item.startswith('added_') and item.endswith('.index'):
- index_file = True
- if os.path.exists(item_path):
- if os.path.exists(os.path.join(logs_dir, item)):
- os.remove(os.path.join(logs_dir, item))
- shutil.move(item_path, logs_dir)
- if item.startswith('total_fea.npy') or item.startswith('events.'):
- if os.path.exists(item_path):
- if os.path.exists(os.path.join(logs_dir, item)):
- os.remove(os.path.join(logs_dir, item))
- shutil.move(item_path, logs_dir)
-
-
- result = ""
- if model_file:
- if index_file:
- print(i18n("The model works for inference, and has the .index file."))
- infos.append("\n" + i18n("The model works for inference, and has the .index file."))
- yield "\n".join(infos)
- else:
- print(i18n("The model works for inference, but it doesn't have the .index file."))
- infos.append("\n" + i18n("The model works for inference, but it doesn't have the .index file."))
- yield "\n".join(infos)
-
- if not index_file and not model_file:
- print(i18n("No relevant file was found to upload."))
- infos.append(i18n("No relevant file was found to upload."))
- yield "\n".join(infos)
-
- if os.path.exists(zips_path):
- shutil.rmtree(zips_path)
- if os.path.exists(unzips_path):
- shutil.rmtree(unzips_path)
- os.chdir(parent_path)
- return result
- except Exception as e:
- os.chdir(parent_path)
- if "too much use" in str(e):
- print(i18n("Too many users have recently viewed or downloaded this file"))
- yield i18n("Too many users have recently viewed or downloaded this file")
- elif "private link" in str(e):
- print(i18n("Cannot get file from this private link"))
- yield i18n("Cannot get file from this private link")
- else:
- print(e)
- yield i18n("An error occurred downloading")
- finally:
- os.chdir(parent_path)
-
-def load_dowloaded_dataset(url):
- parent_path = find_folder_parent(".", "pretrained_v2")
- infos = []
- try:
- zips_path = os.path.join(parent_path, 'zips')
- unzips_path = os.path.join(parent_path, 'unzips')
- datasets_path = os.path.join(parent_path, 'datasets')
- audio_extenions =['wav', 'mp3', 'flac', 'ogg', 'opus',
- 'm4a', 'mp4', 'aac', 'alac', 'wma',
- 'aiff', 'webm', 'ac3']
-
- if os.path.exists(zips_path):
- shutil.rmtree(zips_path)
- if os.path.exists(unzips_path):
- shutil.rmtree(unzips_path)
-
- if not os.path.exists(datasets_path):
- os.mkdir(datasets_path)
-
- os.mkdir(zips_path)
- os.mkdir(unzips_path)
-
- download_file = download_from_url(url)
-
- if not download_file:
- print(i18n("An error occurred downloading"))
- infos.append(i18n("An error occurred downloading"))
- yield "\n".join(infos)
- raise Exception(i18n("An error occurred downloading"))
- elif download_file == "downloaded":
- print(i18n("It has been downloaded successfully."))
- infos.append(i18n("It has been downloaded successfully."))
- yield "\n".join(infos)
- elif download_file == "too much use":
- raise Exception(i18n("Too many users have recently viewed or downloaded this file"))
- elif download_file == "private link":
- raise Exception(i18n("Cannot get file from this private link"))
-
- zip_path = os.listdir(zips_path)
- foldername = ""
- for file in zip_path:
- if file.endswith('.zip'):
- file_path = os.path.join(zips_path, file)
- print("....")
- foldername = file.replace(".zip","").replace(" ","").replace("-","_")
- dataset_path = os.path.join(datasets_path, foldername)
- print(i18n("Proceeding with the extraction..."))
- infos.append(i18n("Proceeding with the extraction..."))
- yield "\n".join(infos)
- shutil.unpack_archive(file_path, unzips_path, 'zip')
- if os.path.exists(dataset_path):
- shutil.rmtree(dataset_path)
-
- os.mkdir(dataset_path)
-
- for root, subfolders, songs in os.walk(unzips_path):
- for song in songs:
- song_path = os.path.join(root, song)
- if song.endswith(tuple(audio_extenions)):
- formatted_song_name = format_title(os.path.splitext(song)[0])
- extension = os.path.splitext(song)[1]
- new_song_path = os.path.join(dataset_path, f"{formatted_song_name}{extension}")
- shutil.move(song_path, new_song_path)
- else:
- print(i18n("Unzip error."))
- infos.append(i18n("Unzip error."))
- yield "\n".join(infos)
-
-
-
- if os.path.exists(zips_path):
- shutil.rmtree(zips_path)
- if os.path.exists(unzips_path):
- shutil.rmtree(unzips_path)
-
- print(i18n("The Dataset has been loaded successfully."))
- infos.append(i18n("The Dataset has been loaded successfully."))
- yield "\n".join(infos)
- except Exception as e:
- os.chdir(parent_path)
- if "too much use" in str(e):
- print(i18n("Too many users have recently viewed or downloaded this file"))
- yield i18n("Too many users have recently viewed or downloaded this file")
- elif "private link" in str(e):
- print(i18n("Cannot get file from this private link"))
- yield i18n("Cannot get file from this private link")
- else:
- print(e)
- yield i18n("An error occurred downloading")
- finally:
- os.chdir(parent_path)
-
-def save_model(modelname, save_action):
-
- parent_path = find_folder_parent(".", "pretrained_v2")
- zips_path = os.path.join(parent_path, 'zips')
- dst = os.path.join(zips_path,modelname)
- logs_path = os.path.join(parent_path, 'logs', modelname)
- weights_path = os.path.join(parent_path, 'weights', f"{modelname}.pth")
- save_folder = parent_path
- infos = []
-
- try:
- if not os.path.exists(logs_path):
- raise Exception("No model found.")
-
- if not 'content' in parent_path:
- save_folder = os.path.join(parent_path, 'RVC_Backup')
- else:
- save_folder = '/content/drive/MyDrive/RVC_Backup'
-
- infos.append(i18n("Save model"))
- yield "\n".join(infos)
-
- if not os.path.exists(save_folder):
- os.mkdir(save_folder)
- if not os.path.exists(os.path.join(save_folder, 'ManualTrainingBackup')):
- os.mkdir(os.path.join(save_folder, 'ManualTrainingBackup'))
- if not os.path.exists(os.path.join(save_folder, 'Finished')):
- os.mkdir(os.path.join(save_folder, 'Finished'))
-
- if os.path.exists(zips_path):
- shutil.rmtree(zips_path)
-
- os.mkdir(zips_path)
- added_file = glob.glob(os.path.join(logs_path, "added_*.index"))
- d_file = glob.glob(os.path.join(logs_path, "D_*.pth"))
- g_file = glob.glob(os.path.join(logs_path, "G_*.pth"))
-
- if save_action == i18n("Choose the method"):
- raise Exception("No method choosen.")
-
- if save_action == i18n("Save all"):
- print(i18n("Save all"))
- save_folder = os.path.join(save_folder, 'ManualTrainingBackup')
- shutil.copytree(logs_path, dst)
- else:
- if not os.path.exists(dst):
- os.mkdir(dst)
-
- if save_action == i18n("Save D and G"):
- print(i18n("Save D and G"))
- save_folder = os.path.join(save_folder, 'ManualTrainingBackup')
- if len(d_file) > 0:
- shutil.copy(d_file[0], dst)
- if len(g_file) > 0:
- shutil.copy(g_file[0], dst)
-
- if len(added_file) > 0:
- shutil.copy(added_file[0], dst)
- else:
- infos.append(i18n("Saved without index..."))
-
- if save_action == i18n("Save voice"):
- print(i18n("Save voice"))
- save_folder = os.path.join(save_folder, 'Finished')
- if len(added_file) > 0:
- shutil.copy(added_file[0], dst)
- else:
- infos.append(i18n("Saved without index..."))
-
- yield "\n".join(infos)
- if not os.path.exists(weights_path):
- infos.append(i18n("Saved without inference model..."))
- else:
- shutil.copy(weights_path, dst)
-
- yield "\n".join(infos)
- infos.append("\n" + i18n("This may take a few minutes, please wait..."))
- yield "\n".join(infos)
-
- shutil.make_archive(os.path.join(zips_path,f"{modelname}"), 'zip', zips_path)
- shutil.move(os.path.join(zips_path,f"{modelname}.zip"), os.path.join(save_folder, f'{modelname}.zip'))
-
- shutil.rmtree(zips_path)
- infos.append("\n" + i18n("Model saved successfully"))
- yield "\n".join(infos)
-
- except Exception as e:
- print(e)
- if "No model found." in str(e):
- infos.append(i18n("The model you want to save does not exist, be sure to enter the correct name."))
- else:
- infos.append(i18n("An error occurred saving the model"))
-
- yield "\n".join(infos)
-
-def load_downloaded_backup(url):
- parent_path = find_folder_parent(".", "pretrained_v2")
- try:
- infos = []
- logs_folders = ['0_gt_wavs','1_16k_wavs','2a_f0','2b-f0nsf','3_feature256','3_feature768']
- zips_path = os.path.join(parent_path, 'zips')
- unzips_path = os.path.join(parent_path, 'unzips')
- weights_path = os.path.join(parent_path, 'weights')
- logs_dir = os.path.join(parent_path, 'logs')
-
- if os.path.exists(zips_path):
- shutil.rmtree(zips_path)
- if os.path.exists(unzips_path):
- shutil.rmtree(unzips_path)
-
- os.mkdir(zips_path)
- os.mkdir(unzips_path)
-
- download_file = download_from_url(url)
- if not download_file:
- print(i18n("The file could not be downloaded."))
- infos.append(i18n("The file could not be downloaded."))
- yield "\n".join(infos)
- elif download_file == "downloaded":
- print(i18n("It has been downloaded successfully."))
- infos.append(i18n("It has been downloaded successfully."))
- yield "\n".join(infos)
- elif download_file == "too much use":
- raise Exception(i18n("Too many users have recently viewed or downloaded this file"))
- elif download_file == "private link":
- raise Exception(i18n("Cannot get file from this private link"))
-
- for filename in os.listdir(zips_path):
- if filename.endswith(".zip"):
- zipfile_path = os.path.join(zips_path,filename)
- zip_dir_name = os.path.splitext(filename)[0]
- unzip_dir = unzips_path
- print(i18n("Proceeding with the extraction..."))
- infos.append(i18n("Proceeding with the extraction..."))
- shutil.unpack_archive(zipfile_path, unzip_dir, 'zip')
-
- if os.path.exists(os.path.join(unzip_dir, zip_dir_name)):
- shutil.move(os.path.join(unzip_dir, zip_dir_name), logs_dir)
- else:
- new_folder_path = os.path.join(logs_dir, zip_dir_name)
- os.mkdir(new_folder_path)
- for item_name in os.listdir(unzip_dir):
- item_path = os.path.join(unzip_dir, item_name)
- if os.path.isfile(item_path):
- shutil.move(item_path, new_folder_path)
- elif os.path.isdir(item_path):
- shutil.move(item_path, new_folder_path)
-
- yield "\n".join(infos)
- else:
- print(i18n("Unzip error."))
- infos.append(i18n("Unzip error."))
- yield "\n".join(infos)
-
- result = ""
-
- for filename in os.listdir(unzips_path):
- if filename.endswith(".zip"):
- silentremove(filename)
-
- if os.path.exists(zips_path):
- shutil.rmtree(zips_path)
- if os.path.exists(os.path.join(parent_path, 'unzips')):
- shutil.rmtree(os.path.join(parent_path, 'unzips'))
- print(i18n("The Backup has been uploaded successfully."))
- infos.append("\n" + i18n("The Backup has been uploaded successfully."))
- yield "\n".join(infos)
- os.chdir(parent_path)
- return result
- except Exception as e:
- os.chdir(parent_path)
- if "too much use" in str(e):
- print(i18n("Too many users have recently viewed or downloaded this file"))
- yield i18n("Too many users have recently viewed or downloaded this file")
- elif "private link" in str(e):
- print(i18n("Cannot get file from this private link"))
- yield i18n("Cannot get file from this private link")
- else:
- print(e)
- yield i18n("An error occurred downloading")
- finally:
- os.chdir(parent_path)
-
-def save_to_wav(record_button):
- if record_button is None:
- pass
- else:
- path_to_file=record_button
- new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav'
- new_path='./audios/'+new_name
- shutil.move(path_to_file,new_path)
- return new_name
-
-
-def change_choices2():
- audio_paths=[]
- for filename in os.listdir("./audios"):
- if filename.endswith(('wav', 'mp3', 'flac', 'ogg', 'opus',
- 'm4a', 'mp4', 'aac', 'alac', 'wma',
- 'aiff', 'webm', 'ac3')):
- audio_paths.append(os.path.join('./audios',filename).replace('\\', '/'))
- return {"choices": sorted(audio_paths), "__type__": "update"}, {"__type__": "update"}
-
-
-
-
-
-def uvr(input_url, output_path, model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0, architecture):
- carpeta_a_eliminar = "yt_downloads"
- if os.path.exists(carpeta_a_eliminar) and os.path.isdir(carpeta_a_eliminar):
- for archivo in os.listdir(carpeta_a_eliminar):
- ruta_archivo = os.path.join(carpeta_a_eliminar, archivo)
- if os.path.isfile(ruta_archivo):
- os.remove(ruta_archivo)
- elif os.path.isdir(ruta_archivo):
- shutil.rmtree(ruta_archivo)
-
-
-
- ydl_opts = {
- 'no-windows-filenames': True,
- 'restrict-filenames': True,
- 'extract_audio': True,
- 'format': 'bestaudio',
- 'quiet': True,
- 'no-warnings': True,
- }
-
- try:
- print(i18n("Downloading audio from the video..."))
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
- info_dict = ydl.extract_info(input_url, download=False)
- formatted_title = format_title(info_dict.get('title', 'default_title'))
- formatted_outtmpl = output_path + '/' + formatted_title + '.wav'
- ydl_opts['outtmpl'] = formatted_outtmpl
- ydl = yt_dlp.YoutubeDL(ydl_opts)
- ydl.download([input_url])
- print(i18n("Audio downloaded!"))
- except Exception as error:
- print(i18n("An error occurred:"), error)
-
- actual_directory = os.path.dirname(__file__)
-
- vocal_directory = os.path.join(actual_directory, save_root_vocal)
- instrumental_directory = os.path.join(actual_directory, save_root_ins)
-
- vocal_formatted = f"vocal_{formatted_title}.wav.reformatted.wav_10.wav"
- instrumental_formatted = f"instrument_{formatted_title}.wav.reformatted.wav_10.wav"
-
- vocal_audio_path = os.path.join(vocal_directory, vocal_formatted)
- instrumental_audio_path = os.path.join(instrumental_directory, instrumental_formatted)
-
- vocal_formatted_mdx = f"{formatted_title}_vocal_.wav"
- instrumental_formatted_mdx = f"{formatted_title}_instrument_.wav"
-
- vocal_audio_path_mdx = os.path.join(vocal_directory, vocal_formatted_mdx)
- instrumental_audio_path_mdx = os.path.join(instrumental_directory, instrumental_formatted_mdx)
-
- if architecture == "VR":
- try:
- print(i18n("Starting audio conversion... (This might take a moment)"))
- inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") for x in [inp_root, save_root_vocal, save_root_ins]]
- usable_files = [os.path.join(inp_root, file)
- for file in os.listdir(inp_root)
- if file.endswith(tuple(sup_audioext))]
-
-
- pre_fun = MDXNetDereverb(15) if model_name == "onnx_dereverb_By_FoxJoy" else (_audio_pre_ if "DeEcho" not in model_name else _audio_pre_new)(
- agg=int(agg),
- model_path=os.path.join(weight_uvr5_root, model_name + ".pth"),
- device=config.device,
- is_half=config.is_half,
- )
-
- try:
- if paths != None:
- paths = [path.name for path in paths]
- else:
- paths = usable_files
-
- except:
- traceback.print_exc()
- paths = usable_files
- print(paths)
- for path in paths:
- inp_path = os.path.join(inp_root, path)
- need_reformat, done = 1, 0
-
- try:
- info = ffmpeg.probe(inp_path, cmd="ffprobe")
- if info["streams"][0]["channels"] == 2 and info["streams"][0]["sample_rate"] == "44100":
- need_reformat = 0
- pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0)
- done = 1
- except:
- traceback.print_exc()
-
- if need_reformat:
- tmp_path = f"{tmp}/{os.path.basename(inp_path)}.reformatted.wav"
- os.system(f"ffmpeg -i {inp_path} -vn -acodec pcm_s16le -ac 2 -ar 44100 {tmp_path} -y")
- inp_path = tmp_path
-
- try:
- if not done:
- pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0)
- print(f"{os.path.basename(inp_path)}->Success")
- except:
- print(f"{os.path.basename(inp_path)}->{traceback.format_exc()}")
- except:
- traceback.print_exc()
- finally:
- try:
- if model_name == "onnx_dereverb_By_FoxJoy":
- del pre_fun.pred.model
- del pre_fun.pred.model_
- else:
- del pre_fun.model
-
- del pre_fun
- return i18n("Finished"), vocal_audio_path, instrumental_audio_path
- except: traceback.print_exc()
-
- if torch.cuda.is_available(): torch.cuda.empty_cache()
-
- elif architecture == "MDX":
- try:
- print(i18n("Starting audio conversion... (This might take a moment)"))
- inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") for x in [inp_root, save_root_vocal, save_root_ins]]
-
- usable_files = [os.path.join(inp_root, file)
- for file in os.listdir(inp_root)
- if file.endswith(tuple(sup_audioext))]
- try:
- if paths != None:
- paths = [path.name for path in paths]
- else:
- paths = usable_files
-
- except:
- traceback.print_exc()
- paths = usable_files
- print(paths)
- invert=True
- denoise=True
- use_custom_parameter=True
- dim_f=2048
- dim_t=256
- n_fft=7680
- use_custom_compensation=True
- compensation=1.025
- suffix = "vocal_" #@param ["Vocals", "Drums", "Bass", "Other"]{allow-input: true}
- suffix_invert = "instrument_" #@param ["Instrumental", "Drumless", "Bassless", "Instruments"]{allow-input: true}
- print_settings = True # @param{type:"boolean"}
- onnx = id_to_ptm(model_name)
- compensation = compensation if use_custom_compensation or use_custom_parameter else None
- mdx_model = prepare_mdx(onnx,use_custom_parameter, dim_f, dim_t, n_fft, compensation=compensation)
-
-
- for path in paths:
- #inp_path = os.path.join(inp_root, path)
- suffix_naming = suffix if use_custom_parameter else None
- diff_suffix_naming = suffix_invert if use_custom_parameter else None
- run_mdx(onnx, mdx_model, path, format0, diff=invert,suffix=suffix_naming,diff_suffix=diff_suffix_naming,denoise=denoise)
-
- if print_settings:
- print()
- print('[MDX-Net_Colab settings used]')
- print(f'Model used: {onnx}')
- print(f'Model MD5: {mdx.MDX.get_hash(onnx)}')
- print(f'Model parameters:')
- print(f' -dim_f: {mdx_model.dim_f}')
- print(f' -dim_t: {mdx_model.dim_t}')
- print(f' -n_fft: {mdx_model.n_fft}')
- print(f' -compensation: {mdx_model.compensation}')
- print()
- print('[Input file]')
- print('filename(s): ')
- for filename in paths:
- print(f' -{filename}')
- print(f"{os.path.basename(filename)}->Success")
- except:
- traceback.print_exc()
- finally:
- try:
- del mdx_model
- return i18n("Finished"), vocal_audio_path_mdx, instrumental_audio_path_mdx
- except: traceback.print_exc()
-
- print("clean_empty_cache")
-
- if torch.cuda.is_available(): torch.cuda.empty_cache()
-sup_audioext = {'wav', 'mp3', 'flac', 'ogg', 'opus',
- 'm4a', 'mp4', 'aac', 'alac', 'wma',
- 'aiff', 'webm', 'ac3'}
-
-def load_downloaded_audio(url):
- parent_path = find_folder_parent(".", "pretrained_v2")
- try:
- infos = []
- audios_path = os.path.join(parent_path, 'audios')
- zips_path = os.path.join(parent_path, 'zips')
-
- if not os.path.exists(audios_path):
- os.mkdir(audios_path)
-
- download_file = download_from_url(url)
- if not download_file:
- print(i18n("The file could not be downloaded."))
- infos.append(i18n("The file could not be downloaded."))
- yield "\n".join(infos)
- elif download_file == "downloaded":
- print(i18n("It has been downloaded successfully."))
- infos.append(i18n("It has been downloaded successfully."))
- yield "\n".join(infos)
- elif download_file == "too much use":
- raise Exception(i18n("Too many users have recently viewed or downloaded this file"))
- elif download_file == "private link":
- raise Exception(i18n("Cannot get file from this private link"))
-
- for filename in os.listdir(zips_path):
- item_path = os.path.join(zips_path, filename)
- if item_path.split('.')[-1] in sup_audioext:
- if os.path.exists(item_path):
- shutil.move(item_path, audios_path)
-
- result = ""
- print(i18n("Audio files have been moved to the 'audios' folder."))
- infos.append(i18n("Audio files have been moved to the 'audios' folder."))
- yield "\n".join(infos)
-
- os.chdir(parent_path)
- return result
- except Exception as e:
- os.chdir(parent_path)
- if "too much use" in str(e):
- print(i18n("Too many users have recently viewed or downloaded this file"))
- yield i18n("Too many users have recently viewed or downloaded this file")
- elif "private link" in str(e):
- print(i18n("Cannot get file from this private link"))
- yield i18n("Cannot get file from this private link")
- else:
- print(e)
- yield i18n("An error occurred downloading")
- finally:
- os.chdir(parent_path)
-
-
-class error_message(Exception):
- def __init__(self, mensaje):
- self.mensaje = mensaje
- super().__init__(mensaje)
-
-def get_vc(sid, to_return_protect0, to_return_protect1):
- global n_spk, tgt_sr, net_g, vc, cpt, version
- if sid == "" or sid == []:
- global hubert_model
- if hubert_model is not None:
- print("clean_empty_cache")
- del net_g, n_spk, vc, hubert_model, tgt_sr
- hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- if_f0 = cpt.get("f0", 1)
- version = cpt.get("version", "v1")
- if version == "v1":
- if if_f0 == 1:
- net_g = SynthesizerTrnMs256NSFsid(
- *cpt["config"], is_half=config.is_half
- )
- else:
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
- elif version == "v2":
- if if_f0 == 1:
- net_g = SynthesizerTrnMs768NSFsid(
- *cpt["config"], is_half=config.is_half
- )
- else:
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
- del net_g, cpt
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- cpt = None
- return (
- {"visible": False, "__type__": "update"},
- {"visible": False, "__type__": "update"},
- {"visible": False, "__type__": "update"},
- )
- person = "%s/%s" % (weight_root, sid)
- print("loading %s" % person)
- cpt = torch.load(person, map_location="cpu")
- tgt_sr = cpt["config"][-1]
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
- if_f0 = cpt.get("f0", 1)
- if if_f0 == 0:
- to_return_protect0 = to_return_protect1 = {
- "visible": False,
- "value": 0.5,
- "__type__": "update",
- }
- else:
- to_return_protect0 = {
- "visible": True,
- "value": to_return_protect0,
- "__type__": "update",
- }
- to_return_protect1 = {
- "visible": True,
- "value": to_return_protect1,
- "__type__": "update",
- }
- version = cpt.get("version", "v1")
- if version == "v1":
- if if_f0 == 1:
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
- else:
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
- elif version == "v2":
- if if_f0 == 1:
- net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
- else:
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
- del net_g.enc_q
- print(net_g.load_state_dict(cpt["weight"], strict=False))
- net_g.eval().to(config.device)
- if config.is_half:
- net_g = net_g.half()
- else:
- net_g = net_g.float()
- vc = VC(tgt_sr, config)
- n_spk = cpt["config"][-3]
- return (
- {"visible": True, "maximum": n_spk, "__type__": "update"},
- to_return_protect0,
- to_return_protect1,
- )
-
-def update_model_choices(select_value):
- model_ids = get_model_list()
- model_ids_list = list(model_ids)
- if select_value == "VR":
- return {"choices": uvr5_names, "__type__": "update"}
- elif select_value == "MDX":
- return {"choices": model_ids_list, "__type__": "update"}
-
-def download_model():
- gr.Markdown(value="# " + i18n("Download Model"))
- gr.Markdown(value=i18n("It is used to download your inference models."))
- with gr.Row():
- model_url=gr.Textbox(label=i18n("Url:"))
- with gr.Row():
- download_model_status_bar=gr.Textbox(label=i18n("Status:"))
- with gr.Row():
- download_button=gr.Button(i18n("Download"))
- download_button.click(fn=load_downloaded_model, inputs=[model_url], outputs=[download_model_status_bar])
-
-def download_backup():
- gr.Markdown(value="# " + i18n("Download Backup"))
- gr.Markdown(value=i18n("It is used to download your training backups."))
- with gr.Row():
- model_url=gr.Textbox(label=i18n("Url:"))
- with gr.Row():
- download_model_status_bar=gr.Textbox(label=i18n("Status:"))
- with gr.Row():
- download_button=gr.Button(i18n("Download"))
- download_button.click(fn=load_downloaded_backup, inputs=[model_url], outputs=[download_model_status_bar])
-
-def update_dataset_list(name):
- new_datasets = []
- for foldername in os.listdir("./datasets"):
- if "." not in foldername:
- new_datasets.append(os.path.join(find_folder_parent(".","pretrained"),"datasets",foldername))
- return gr.Dropdown.update(choices=new_datasets)
-
-def download_dataset(trainset_dir4):
- gr.Markdown(value="# " + i18n("Download Dataset"))
- gr.Markdown(value=i18n("Download the dataset with the audios in a compatible format (.wav/.flac) to train your model."))
- with gr.Row():
- dataset_url=gr.Textbox(label=i18n("Url:"))
- with gr.Row():
- load_dataset_status_bar=gr.Textbox(label=i18n("Status:"))
- with gr.Row():
- load_dataset_button=gr.Button(i18n("Download"))
- load_dataset_button.click(fn=load_dowloaded_dataset, inputs=[dataset_url], outputs=[load_dataset_status_bar])
- load_dataset_status_bar.change(update_dataset_list, dataset_url, trainset_dir4)
-
-def download_audio():
- gr.Markdown(value="# " + i18n("Download Audio"))
- gr.Markdown(value=i18n("Download audios of any format for use in inference (recommended for mobile users)."))
- with gr.Row():
- audio_url=gr.Textbox(label=i18n("Url:"))
- with gr.Row():
- download_audio_status_bar=gr.Textbox(label=i18n("Status:"))
- with gr.Row():
- download_button2=gr.Button(i18n("Download"))
- download_button2.click(fn=load_downloaded_audio, inputs=[audio_url], outputs=[download_audio_status_bar])
-
-def youtube_separator():
- gr.Markdown(value="# " + i18n("Separate YouTube tracks"))
- gr.Markdown(value=i18n("Download audio from a YouTube video and automatically separate the vocal and instrumental tracks"))
- with gr.Row():
- input_url = gr.inputs.Textbox(label=i18n("Enter the YouTube link:"))
- output_path = gr.Textbox(
- label=i18n("Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):"),
- value=os.path.abspath(os.getcwd()).replace('\\', '/') + "/yt_downloads",
- visible=False,
- )
- advanced_settings_checkbox = gr.Checkbox(
- value=False,
- label=i18n("Advanced Settings"),
- interactive=True,
- )
- with gr.Row(label = i18n("Advanced Settings"), visible=False, variant='compact') as advanced_settings:
- with gr.Column():
- model_select = gr.Radio(
- label=i18n("Model Architecture:"),
- choices=["VR", "MDX"],
- value="VR",
- interactive=True,
- )
- model_choose = gr.Dropdown(label=i18n("Model: (Be aware that in some models the named vocal will be the instrumental)"),
- choices=uvr5_names,
- value="HP5_only_main_vocal"
- )
- with gr.Row():
- agg = gr.Slider(
- minimum=0,
- maximum=20,
- step=1,
- label=i18n("Vocal Extraction Aggressive"),
- value=10,
- interactive=True,
- )
- with gr.Row():
- opt_vocal_root = gr.Textbox(
- label=i18n("Specify the output folder for vocals:"), value="audios",
- )
- opt_ins_root = gr.Textbox(
- label=i18n("Specify the output folder for accompaniment:"), value="audio-others",
- )
- dir_wav_input = gr.Textbox(
- label=i18n("Enter the path of the audio folder to be processed:"),
- value=((os.getcwd()).replace('\\', '/') + "/yt_downloads"),
- visible=False,
- )
- format0 = gr.Radio(
- label=i18n("Export file format"),
- choices=["wav", "flac", "mp3", "m4a"],
- value="wav",
- visible=False,
- interactive=True,
- )
- wav_inputs = gr.File(
- file_count="multiple", label=i18n("You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder."),
- visible=False,
- )
- model_select.change(
- fn=update_model_choices,
- inputs=model_select,
- outputs=model_choose,
- )
- with gr.Row():
- vc_output4 = gr.Textbox(label=i18n("Status:"))
- vc_output5 = gr.Audio(label=i18n("Vocal"), type='filepath')
- vc_output6 = gr.Audio(label=i18n("Instrumental"), type='filepath')
- with gr.Row():
- but2 = gr.Button(i18n("Download and Separate"))
- but2.click(
- uvr,
- [
- input_url,
- output_path,
- model_choose,
- dir_wav_input,
- opt_vocal_root,
- wav_inputs,
- opt_ins_root,
- agg,
- format0,
- model_select
- ],
- [vc_output4, vc_output5, vc_output6],
- )
- def toggle_advanced_settings(checkbox):
- return {"visible": checkbox, "__type__": "update"}
-
- advanced_settings_checkbox.change(
- fn=toggle_advanced_settings,
- inputs=[advanced_settings_checkbox],
- outputs=[advanced_settings]
- )
-
-
-def get_bark_voice():
- mensaje = """
-v2/en_speaker_0 English Male
-v2/en_speaker_1 English Male
-v2/en_speaker_2 English Male
-v2/en_speaker_3 English Male
-v2/en_speaker_4 English Male
-v2/en_speaker_5 English Male
-v2/en_speaker_6 English Male
-v2/en_speaker_7 English Male
-v2/en_speaker_8 English Male
-v2/en_speaker_9 English Female
-v2/zh_speaker_0 Chinese (Simplified) Male
-v2/zh_speaker_1 Chinese (Simplified) Male
-v2/zh_speaker_2 Chinese (Simplified) Male
-v2/zh_speaker_3 Chinese (Simplified) Male
-v2/zh_speaker_4 Chinese (Simplified) Female
-v2/zh_speaker_5 Chinese (Simplified) Male
-v2/zh_speaker_6 Chinese (Simplified) Female
-v2/zh_speaker_7 Chinese (Simplified) Female
-v2/zh_speaker_8 Chinese (Simplified) Male
-v2/zh_speaker_9 Chinese (Simplified) Female
-v2/fr_speaker_0 French Male
-v2/fr_speaker_1 French Female
-v2/fr_speaker_2 French Female
-v2/fr_speaker_3 French Male
-v2/fr_speaker_4 French Male
-v2/fr_speaker_5 French Female
-v2/fr_speaker_6 French Male
-v2/fr_speaker_7 French Male
-v2/fr_speaker_8 French Male
-v2/fr_speaker_9 French Male
-v2/de_speaker_0 German Male
-v2/de_speaker_1 German Male
-v2/de_speaker_2 German Male
-v2/de_speaker_3 German Female
-v2/de_speaker_4 German Male
-v2/de_speaker_5 German Male
-v2/de_speaker_6 German Male
-v2/de_speaker_7 German Male
-v2/de_speaker_8 German Female
-v2/de_speaker_9 German Male
-v2/hi_speaker_0 Hindi Female
-v2/hi_speaker_1 Hindi Female
-v2/hi_speaker_2 Hindi Male
-v2/hi_speaker_3 Hindi Female
-v2/hi_speaker_4 Hindi Female
-v2/hi_speaker_5 Hindi Male
-v2/hi_speaker_6 Hindi Male
-v2/hi_speaker_7 Hindi Male
-v2/hi_speaker_8 Hindi Male
-v2/hi_speaker_9 Hindi Female
-v2/it_speaker_0 Italian Male
-v2/it_speaker_1 Italian Male
-v2/it_speaker_2 Italian Female
-v2/it_speaker_3 Italian Male
-v2/it_speaker_4 Italian Male
-v2/it_speaker_5 Italian Male
-v2/it_speaker_6 Italian Male
-v2/it_speaker_7 Italian Female
-v2/it_speaker_8 Italian Male
-v2/it_speaker_9 Italian Female
-v2/ja_speaker_0 Japanese Female
-v2/ja_speaker_1 Japanese Female
-v2/ja_speaker_2 Japanese Male
-v2/ja_speaker_3 Japanese Female
-v2/ja_speaker_4 Japanese Female
-v2/ja_speaker_5 Japanese Female
-v2/ja_speaker_6 Japanese Male
-v2/ja_speaker_7 Japanese Female
-v2/ja_speaker_8 Japanese Female
-v2/ja_speaker_9 Japanese Female
-v2/ko_speaker_0 Korean Female
-v2/ko_speaker_1 Korean Male
-v2/ko_speaker_2 Korean Male
-v2/ko_speaker_3 Korean Male
-v2/ko_speaker_4 Korean Male
-v2/ko_speaker_5 Korean Male
-v2/ko_speaker_6 Korean Male
-v2/ko_speaker_7 Korean Male
-v2/ko_speaker_8 Korean Male
-v2/ko_speaker_9 Korean Male
-v2/pl_speaker_0 Polish Male
-v2/pl_speaker_1 Polish Male
-v2/pl_speaker_2 Polish Male
-v2/pl_speaker_3 Polish Male
-v2/pl_speaker_4 Polish Female
-v2/pl_speaker_5 Polish Male
-v2/pl_speaker_6 Polish Female
-v2/pl_speaker_7 Polish Male
-v2/pl_speaker_8 Polish Male
-v2/pl_speaker_9 Polish Female
-v2/pt_speaker_0 Portuguese Male
-v2/pt_speaker_1 Portuguese Male
-v2/pt_speaker_2 Portuguese Male
-v2/pt_speaker_3 Portuguese Male
-v2/pt_speaker_4 Portuguese Male
-v2/pt_speaker_5 Portuguese Male
-v2/pt_speaker_6 Portuguese Male
-v2/pt_speaker_7 Portuguese Male
-v2/pt_speaker_8 Portuguese Male
-v2/pt_speaker_9 Portuguese Male
-v2/ru_speaker_0 Russian Male
-v2/ru_speaker_1 Russian Male
-v2/ru_speaker_2 Russian Male
-v2/ru_speaker_3 Russian Male
-v2/ru_speaker_4 Russian Male
-v2/ru_speaker_5 Russian Female
-v2/ru_speaker_6 Russian Female
-v2/ru_speaker_7 Russian Male
-v2/ru_speaker_8 Russian Male
-v2/ru_speaker_9 Russian Female
-v2/es_speaker_0 Spanish Male
-v2/es_speaker_1 Spanish Male
-v2/es_speaker_2 Spanish Male
-v2/es_speaker_3 Spanish Male
-v2/es_speaker_4 Spanish Male
-v2/es_speaker_5 Spanish Male
-v2/es_speaker_6 Spanish Male
-v2/es_speaker_7 Spanish Male
-v2/es_speaker_8 Spanish Female
-v2/es_speaker_9 Spanish Female
-v2/tr_speaker_0 Turkish Male
-v2/tr_speaker_1 Turkish Male
-v2/tr_speaker_2 Turkish Male
-v2/tr_speaker_3 Turkish Male
-v2/tr_speaker_4 Turkish Female
-v2/tr_speaker_5 Turkish Female
-v2/tr_speaker_6 Turkish Male
-v2/tr_speaker_7 Turkish Male
-v2/tr_speaker_8 Turkish Male
-v2/tr_speaker_9 Turkish Male
- """
-# Dividir el mensaje en líneas
- lineas = mensaje.split("\n")
- datos_deseados = []
- for linea in lineas:
- partes = linea.split("\t")
- if len(partes) == 3:
- clave, _, genero = partes
- datos_deseados.append(f"{clave}-{genero}")
-
- return datos_deseados
-
-
-def get_edge_voice():
- completed_process = subprocess.run(['edge-tts',"-l"], capture_output=True, text=True)
- lines = completed_process.stdout.strip().split("\n")
- data = []
- current_entry = {}
- for line in lines:
- if line.startswith("Name: "):
- if current_entry:
- data.append(current_entry)
- current_entry = {"Name": line.split(": ")[1]}
- elif line.startswith("Gender: "):
- current_entry["Gender"] = line.split(": ")[1]
- if current_entry:
- data.append(current_entry)
- tts_voice = []
- for entry in data:
- name = entry["Name"]
- gender = entry["Gender"]
- formatted_entry = f'{name}-{gender}'
- tts_voice.append(formatted_entry)
- return tts_voice
-
-
-#print(set_tts_voice)
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/infer_uvr5.py b/spaces/Kangarroar/ApplioRVC-Inference/infer_uvr5.py
deleted file mode 100644
index 8c8c05429a1d65dd8b198f16a8ea8c6e68991c07..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/infer_uvr5.py
+++ /dev/null
@@ -1,363 +0,0 @@
-import os, sys, torch, warnings, pdb
-
-now_dir = os.getcwd()
-sys.path.append(now_dir)
-from json import load as ll
-
-warnings.filterwarnings("ignore")
-import librosa
-import importlib
-import numpy as np
-import hashlib, math
-from tqdm import tqdm
-from lib.uvr5_pack.lib_v5 import spec_utils
-from lib.uvr5_pack.utils import _get_name_params, inference
-from lib.uvr5_pack.lib_v5.model_param_init import ModelParameters
-import soundfile as sf
-from lib.uvr5_pack.lib_v5.nets_new import CascadedNet
-from lib.uvr5_pack.lib_v5 import nets_61968KB as nets
-
-
-class _audio_pre_:
- def __init__(self, agg, model_path, device, is_half):
- self.model_path = model_path
- self.device = device
- self.data = {
- # Processing Options
- "postprocess": False,
- "tta": False,
- # Constants
- "window_size": 512,
- "agg": agg,
- "high_end_process": "mirroring",
- }
- mp = ModelParameters("lib/uvr5_pack/lib_v5/modelparams/4band_v2.json")
- model = nets.CascadedASPPNet(mp.param["bins"] * 2)
- cpk = torch.load(model_path, map_location="cpu")
- model.load_state_dict(cpk)
- model.eval()
- if is_half:
- model = model.half().to(device)
- else:
- model = model.to(device)
-
- self.mp = mp
- self.model = model
-
- def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"):
- if ins_root is None and vocal_root is None:
- return "No save root."
- name = os.path.basename(music_file)
- if ins_root is not None:
- os.makedirs(ins_root, exist_ok=True)
- if vocal_root is not None:
- os.makedirs(vocal_root, exist_ok=True)
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
- bands_n = len(self.mp.param["band"])
- # print(bands_n)
- for d in range(bands_n, 0, -1):
- bp = self.mp.param["band"][d]
- if d == bands_n: # high-end band
- (
- X_wave[d],
- _,
- ) = librosa.core.load(
- music_file,
- bp["sr"],
- False,
- dtype=np.float32,
- res_type=bp["res_type"],
- )
- if X_wave[d].ndim == 1:
- X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
- else: # lower bands
- X_wave[d] = librosa.core.resample(
- X_wave[d + 1],
- self.mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
- # Stft of wave source
- X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
- X_wave[d],
- bp["hl"],
- bp["n_fft"],
- self.mp.param["mid_side"],
- self.mp.param["mid_side_b2"],
- self.mp.param["reverse"],
- )
- # pdb.set_trace()
- if d == bands_n and self.data["high_end_process"] != "none":
- input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
- self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
- )
- input_high_end = X_spec_s[d][
- :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
- ]
-
- X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
- aggresive_set = float(self.data["agg"] / 100)
- aggressiveness = {
- "value": aggresive_set,
- "split_bin": self.mp.param["band"][1]["crop_stop"],
- }
- with torch.no_grad():
- pred, X_mag, X_phase = inference(
- X_spec_m, self.device, self.model, aggressiveness, self.data
- )
- # Postprocess
- if self.data["postprocess"]:
- pred_inv = np.clip(X_mag - pred, 0, np.inf)
- pred = spec_utils.mask_silence(pred, pred_inv)
- y_spec_m = pred * X_phase
- v_spec_m = X_spec_m - y_spec_m
-
- if ins_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], y_spec_m, input_high_end, self.mp
- )
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(
- y_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
- print("%s instruments done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- ins_root,
- "instrument_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- ) #
- else:
- path = os.path.join(
- ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
- if vocal_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], v_spec_m, input_high_end, self.mp
- )
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(
- v_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
- print("%s vocals done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- vocal_root,
- "vocal_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- else:
- path = os.path.join(
- vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
-
-
-class _audio_pre_new:
- def __init__(self, agg, model_path, device, is_half):
- self.model_path = model_path
- self.device = device
- self.data = {
- # Processing Options
- "postprocess": False,
- "tta": False,
- # Constants
- "window_size": 512,
- "agg": agg,
- "high_end_process": "mirroring",
- }
- mp = ModelParameters("lib/uvr5_pack/lib_v5/modelparams/4band_v3.json")
- nout = 64 if "DeReverb" in model_path else 48
- model = CascadedNet(mp.param["bins"] * 2, nout)
- cpk = torch.load(model_path, map_location="cpu")
- model.load_state_dict(cpk)
- model.eval()
- if is_half:
- model = model.half().to(device)
- else:
- model = model.to(device)
-
- self.mp = mp
- self.model = model
-
- def _path_audio_(
- self, music_file, vocal_root=None, ins_root=None, format="flac"
- ): # 3个VR模型vocal和ins是反的
- if ins_root is None and vocal_root is None:
- return "No save root."
- name = os.path.basename(music_file)
- if ins_root is not None:
- os.makedirs(ins_root, exist_ok=True)
- if vocal_root is not None:
- os.makedirs(vocal_root, exist_ok=True)
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
- bands_n = len(self.mp.param["band"])
- # print(bands_n)
- for d in range(bands_n, 0, -1):
- bp = self.mp.param["band"][d]
- if d == bands_n: # high-end band
- (
- X_wave[d],
- _,
- ) = librosa.core.load(
- music_file,
- bp["sr"],
- False,
- dtype=np.float32,
- res_type=bp["res_type"],
- )
- if X_wave[d].ndim == 1:
- X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
- else: # lower bands
- X_wave[d] = librosa.core.resample(
- X_wave[d + 1],
- self.mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
- # Stft of wave source
- X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
- X_wave[d],
- bp["hl"],
- bp["n_fft"],
- self.mp.param["mid_side"],
- self.mp.param["mid_side_b2"],
- self.mp.param["reverse"],
- )
- # pdb.set_trace()
- if d == bands_n and self.data["high_end_process"] != "none":
- input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
- self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
- )
- input_high_end = X_spec_s[d][
- :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
- ]
-
- X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
- aggresive_set = float(self.data["agg"] / 100)
- aggressiveness = {
- "value": aggresive_set,
- "split_bin": self.mp.param["band"][1]["crop_stop"],
- }
- with torch.no_grad():
- pred, X_mag, X_phase = inference(
- X_spec_m, self.device, self.model, aggressiveness, self.data
- )
- # Postprocess
- if self.data["postprocess"]:
- pred_inv = np.clip(X_mag - pred, 0, np.inf)
- pred = spec_utils.mask_silence(pred, pred_inv)
- y_spec_m = pred * X_phase
- v_spec_m = X_spec_m - y_spec_m
-
- if ins_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], y_spec_m, input_high_end, self.mp
- )
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(
- y_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
- print("%s instruments done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- ins_root,
- "instrument_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- ) #
- else:
- path = os.path.join(
- ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
- if vocal_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], v_spec_m, input_high_end, self.mp
- )
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(
- v_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
- print("%s vocals done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- vocal_root,
- "vocal_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- else:
- path = os.path.join(
- vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
-
-
-if __name__ == "__main__":
- device = "cuda"
- is_half = True
- # model_path = "uvr5_weights/2_HP-UVR.pth"
- # model_path = "uvr5_weights/VR-DeEchoDeReverb.pth"
- # model_path = "uvr5_weights/VR-DeEchoNormal.pth"
- model_path = "uvr5_weights/DeEchoNormal.pth"
- # pre_fun = _audio_pre_(model_path=model_path, device=device, is_half=True,agg=10)
- pre_fun = _audio_pre_new(model_path=model_path, device=device, is_half=True, agg=10)
- audio_path = "雪雪伴奏对消HP5.wav"
- save_path = "opt"
- pre_fun._path_audio_(audio_path, save_path, save_path)
diff --git a/spaces/KenjieDec/RemBG/rembg/sessions/u2net_human_seg.py b/spaces/KenjieDec/RemBG/rembg/sessions/u2net_human_seg.py
deleted file mode 100644
index 166c195302c2530b63e79b4884ffb7681388c902..0000000000000000000000000000000000000000
--- a/spaces/KenjieDec/RemBG/rembg/sessions/u2net_human_seg.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import os
-from typing import List
-
-import numpy as np
-import pooch
-from PIL import Image
-from PIL.Image import Image as PILImage
-
-from .base import BaseSession
-
-
-class U2netHumanSegSession(BaseSession):
- def predict(self, img: PILImage, *args, **kwargs) -> List[PILImage]:
- ort_outs = self.inner_session.run(
- None,
- self.normalize(
- img, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225), (320, 320)
- ),
- )
-
- pred = ort_outs[0][:, 0, :, :]
-
- ma = np.max(pred)
- mi = np.min(pred)
-
- pred = (pred - mi) / (ma - mi)
- pred = np.squeeze(pred)
-
- mask = Image.fromarray((pred * 255).astype("uint8"), mode="L")
- mask = mask.resize(img.size, Image.LANCZOS)
-
- return [mask]
-
- @classmethod
- def download_models(cls, *args, **kwargs):
- fname = f"{cls.name()}.onnx"
- pooch.retrieve(
- "https://github.com/danielgatis/rembg/releases/download/v0.0.0/u2net_human_seg.onnx",
- None
- if cls.checksum_disabled(*args, **kwargs)
- else "md5:c09ddc2e0104f800e3e1bb4652583d1f",
- fname=fname,
- path=cls.u2net_home(*args, **kwargs),
- progressbar=True,
- )
-
- return os.path.join(cls.u2net_home(), fname)
-
- @classmethod
- def name(cls, *args, **kwargs):
- return "u2net_human_seg"
diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/uvr5_pack/lib_v5/model_param_init.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/uvr5_pack/lib_v5/model_param_init.py
deleted file mode 100644
index 5d818dbee4d4490b2884b3346c20c9370c0810fc..0000000000000000000000000000000000000000
--- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/uvr5_pack/lib_v5/model_param_init.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import json
-import pathlib
-
-default_param = {}
-default_param["bins"] = 768
-default_param["unstable_bins"] = 9 # training only
-default_param["reduction_bins"] = 762 # training only
-default_param["sr"] = 44100
-default_param["pre_filter_start"] = 757
-default_param["pre_filter_stop"] = 768
-default_param["band"] = {}
-
-
-default_param["band"][1] = {
- "sr": 11025,
- "hl": 128,
- "n_fft": 960,
- "crop_start": 0,
- "crop_stop": 245,
- "lpf_start": 61, # inference only
- "res_type": "polyphase",
-}
-
-default_param["band"][2] = {
- "sr": 44100,
- "hl": 512,
- "n_fft": 1536,
- "crop_start": 24,
- "crop_stop": 547,
- "hpf_start": 81, # inference only
- "res_type": "sinc_best",
-}
-
-
-def int_keys(d):
- r = {}
- for k, v in d:
- if k.isdigit():
- k = int(k)
- r[k] = v
- return r
-
-
-class ModelParameters(object):
- def __init__(self, config_path=""):
- if ".pth" == pathlib.Path(config_path).suffix:
- import zipfile
-
- with zipfile.ZipFile(config_path, "r") as zip:
- self.param = json.loads(
- zip.read("param.json"), object_pairs_hook=int_keys
- )
- elif ".json" == pathlib.Path(config_path).suffix:
- with open(config_path, "r") as f:
- self.param = json.loads(f.read(), object_pairs_hook=int_keys)
- else:
- self.param = default_param
-
- for k in [
- "mid_side",
- "mid_side_b",
- "mid_side_b2",
- "stereo_w",
- "stereo_n",
- "reverse",
- ]:
- if not k in self.param:
- self.param[k] = False
diff --git a/spaces/Lbin123/Lbingo/src/components/ui/tooltip.tsx b/spaces/Lbin123/Lbingo/src/components/ui/tooltip.tsx
deleted file mode 100644
index af1d48beb90dd5ae311796539843700871052cae..0000000000000000000000000000000000000000
--- a/spaces/Lbin123/Lbingo/src/components/ui/tooltip.tsx
+++ /dev/null
@@ -1,30 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import * as TooltipPrimitive from '@radix-ui/react-tooltip'
-
-import { cn } from '@/lib/utils'
-
-const TooltipProvider = TooltipPrimitive.Provider
-
-const Tooltip = TooltipPrimitive.Root
-
-const TooltipTrigger = TooltipPrimitive.Trigger
-
-const TooltipContent = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, sideOffset = 4, ...props }, ref) => (
-
-))
-TooltipContent.displayName = TooltipPrimitive.Content.displayName
-
-export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }
diff --git a/spaces/LightChen2333/OpenSLU/model/encoder/non_pretrained_encoder.py b/spaces/LightChen2333/OpenSLU/model/encoder/non_pretrained_encoder.py
deleted file mode 100644
index f842b4bd2ca8c5e6eb9001a03e5f46ec98650e37..0000000000000000000000000000000000000000
--- a/spaces/LightChen2333/OpenSLU/model/encoder/non_pretrained_encoder.py
+++ /dev/null
@@ -1,212 +0,0 @@
-'''
-Author: Qiguang Chen
-Date: 2023-01-11 10:39:26
-LastEditors: Qiguang Chen
-LastEditTime: 2023-02-17 21:08:19
-Description: non-pretrained encoder model
-
-'''
-import math
-
-import einops
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
-
-from common.utils import HiddenData, InputData
-from model.encoder.base_encoder import BaseEncoder
-
-class NonPretrainedEncoder(BaseEncoder):
- """
- Encoder structure based on bidirectional LSTM and self-attention.
- """
-
- def __init__(self, **config):
- """ init non-pretrained encoder
-
- Args:
- config (dict):
- embedding (dict):
- dropout_rate (float): dropout rate.
- load_embedding_name (str): None if not use pretrained embedding or embedding name like "glove.6B.300d.txt".
- embedding_matrix (Tensor, Optional): embedding matrix tensor. Enabled if load_embedding_name is not None.
- vocab_size (str, Optional): vocabulary size. Enabled if load_embedding_name is None.
- lstm (dict):
- output_dim (int): lstm output dim.
- bidirectional (bool): if use BiLSTM or LSTM.
- layer_num (int): number of layers.
- dropout_rate (float): dropout rate.
- attention (dict, Optional):
- dropout_rate (float): dropout rate.
- hidden_dim (int): attention hidden dim.
- output_dim (int): attention output dim.
- unflat_attention (dict, optional): Enabled if attention is not None.
- dropout_rate (float): dropout rate.
- """
- super(NonPretrainedEncoder, self).__init__()
- self.config = config
- # Embedding Initialization
- embed_config = config["embedding"]
- self.__embedding_dim = embed_config["embedding_dim"]
- if embed_config.get("load_embedding_name") and embed_config.get("embedding_matrix"):
- self.__embedding_layer = nn.Embedding.from_pretrained(embed_config["embedding_matrix"], padding_idx=0)
- else:
- self.__embedding_layer = nn.Embedding(
- embed_config["vocab_size"], self.__embedding_dim
- )
- self.__embedding_dropout_layer = nn.Dropout(embed_config["dropout_rate"])
-
- # LSTM Initialization
- lstm_config = config["lstm"]
- self.__hidden_size = lstm_config["output_dim"]
- self.__lstm_layer = nn.LSTM(
- input_size=self.__embedding_dim,
- hidden_size=lstm_config["output_dim"] // 2,
- batch_first=True,
- bidirectional=lstm_config["bidirectional"],
- dropout=lstm_config["dropout_rate"],
- num_layers=lstm_config["layer_num"]
- )
- if self.config.get("attention"):
- # Attention Initialization
- att_config = config["attention"]
- self.__attention_dropout_layer = nn.Dropout(att_config["dropout_rate"])
- self.__attention_layer = QKVAttention(
- self.__embedding_dim, self.__embedding_dim, self.__embedding_dim,
- att_config["hidden_dim"], att_config["output_dim"], att_config["dropout_rate"]
- )
- if self.config.get("unflat_attention"):
- unflat_att_config = config["unflat_attention"]
- self.__sentattention = UnflatSelfAttention(
- lstm_config["output_dim"] + att_config["output_dim"],
- unflat_att_config["dropout_rate"]
- )
-
- def forward(self, inputs: InputData):
- """ Forward process for Non-Pretrained Encoder.
-
- Args:
- inputs: padded input ids, masks.
- Returns:
- encoded hidden vectors.
- """
-
- # LSTM Encoder
- # Padded_text should be instance of LongTensor.
- embedded_text = self.__embedding_layer(inputs.input_ids)
- dropout_text = self.__embedding_dropout_layer(embedded_text)
- seq_lens = inputs.attention_mask.sum(-1).detach().cpu()
- # Pack and Pad process for input of variable length.
- packed_text = pack_padded_sequence(dropout_text, seq_lens, batch_first=True, enforce_sorted=False)
- lstm_hiddens, (h_last, c_last) = self.__lstm_layer(packed_text)
- padded_hiddens, _ = pad_packed_sequence(lstm_hiddens, batch_first=True)
-
- if self.config.get("attention"):
- # Attention Encoder
- dropout_text = self.__attention_dropout_layer(embedded_text)
- attention_hiddens = self.__attention_layer(
- dropout_text, dropout_text, dropout_text, mask=inputs.attention_mask
- )
-
- # Attention + LSTM
- hiddens = torch.cat([attention_hiddens, padded_hiddens], dim=-1)
- hidden = HiddenData(None, hiddens)
- if self.config.get("return_with_input"):
- hidden.add_input(inputs)
- if self.config.get("return_sentence_level_hidden"):
- if self.config.get("unflat_attention"):
- sentence = self.__sentattention(hiddens, seq_lens)
- else:
- sentence = hiddens[:, 0, :]
- hidden.update_intent_hidden_state(sentence)
- else:
- sentence_hidden = None
- if self.config.get("return_sentence_level_hidden"):
- sentence_hidden = torch.cat((h_last[-1], h_last[-1], c_last[-1], c_last[-2]), dim=-1)
- hidden = HiddenData(sentence_hidden, padded_hiddens)
- if self.config.get("return_with_input"):
- hidden.add_input(inputs)
-
- return hidden
-
-
-class QKVAttention(nn.Module):
- """
- Attention mechanism based on Query-Key-Value architecture. And
- especially, when query == key == value, it's self-attention.
- """
-
- def __init__(self, query_dim, key_dim, value_dim, hidden_dim, output_dim, dropout_rate):
- super(QKVAttention, self).__init__()
-
- # Record hyper-parameters.
- self.__query_dim = query_dim
- self.__key_dim = key_dim
- self.__value_dim = value_dim
- self.__hidden_dim = hidden_dim
- self.__output_dim = output_dim
- self.__dropout_rate = dropout_rate
-
- # Declare network structures.
- self.__query_layer = nn.Linear(self.__query_dim, self.__hidden_dim)
- self.__key_layer = nn.Linear(self.__key_dim, self.__hidden_dim)
- self.__value_layer = nn.Linear(self.__value_dim, self.__output_dim)
- self.__dropout_layer = nn.Dropout(p=self.__dropout_rate)
-
- def forward(self, input_query, input_key, input_value, mask=None):
- """ The forward propagation of attention.
-
- Here we require the first dimension of input key
- and value are equal.
-
- Args:
- input_query: is query tensor, (n, d_q)
- input_key: is key tensor, (m, d_k)
- input_value: is value tensor, (m, d_v)
-
- Returns:
- attention based tensor, (n, d_h)
- """
-
- # Linear transform to fine-tune dimension.
- linear_query = self.__query_layer(input_query)
- linear_key = self.__key_layer(input_key)
- linear_value = self.__value_layer(input_value)
-
- score_tensor = torch.matmul(
- linear_query,
- linear_key.transpose(-2, -1)
- ) / math.sqrt(self.__hidden_dim)
- if mask is not None:
- attn_mask = einops.repeat((mask == 0), "b l -> b l h", h=score_tensor.shape[-1])
- score_tensor = score_tensor.masked_fill_(attn_mask, -float(1e20))
- score_tensor = F.softmax(score_tensor, dim=-1)
- forced_tensor = torch.matmul(score_tensor, linear_value)
- forced_tensor = self.__dropout_layer(forced_tensor)
-
- return forced_tensor
-
-
-class UnflatSelfAttention(nn.Module):
- """
- scores each element of the sequence with a linear layer and uses the normalized scores to compute a context over the sequence.
- """
-
- def __init__(self, d_hid, dropout=0.):
- super().__init__()
- self.scorer = nn.Linear(d_hid, 1)
- self.dropout = nn.Dropout(dropout)
-
- def forward(self, inp, lens):
- batch_size, seq_len, d_feat = inp.size()
- inp = self.dropout(inp)
- scores = self.scorer(inp.contiguous().view(-1, d_feat)).view(batch_size, seq_len)
- max_len = max(lens)
- for i, l in enumerate(lens):
- if l < max_len:
- scores.data[i, l:] = -np.inf
- scores = F.softmax(scores, dim=1)
- context = scores.unsqueeze(2).expand_as(inp).mul(inp).sum(1)
- return context
\ No newline at end of file
diff --git a/spaces/LightSY/W2L-TD/hparams.py b/spaces/LightSY/W2L-TD/hparams.py
deleted file mode 100644
index 7778a23b060445677118f62458590083d7e0f0cd..0000000000000000000000000000000000000000
--- a/spaces/LightSY/W2L-TD/hparams.py
+++ /dev/null
@@ -1,101 +0,0 @@
-from glob import glob
-import os
-
-def get_image_list(data_root, split):
- filelist = []
-
- with open('filelists/{}.txt'.format(split)) as f:
- for line in f:
- line = line.strip()
- if ' ' in line: line = line.split()[0]
- filelist.append(os.path.join(data_root, line))
-
- return filelist
-
-class HParams:
- def __init__(self, **kwargs):
- self.data = {}
-
- for key, value in kwargs.items():
- self.data[key] = value
-
- def __getattr__(self, key):
- if key not in self.data:
- raise AttributeError("'HParams' object has no attribute %s" % key)
- return self.data[key]
-
- def set_hparam(self, key, value):
- self.data[key] = value
-
-
-# Default hyperparameters
-hparams = HParams(
- num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality
- # network
- rescale=True, # Whether to rescale audio prior to preprocessing
- rescaling_max=0.9, # Rescaling value
-
- # Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
- # It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
- # Does not work if n_ffit is not multiple of hop_size!!
- use_lws=False,
-
- n_fft=800, # Extra window size is filled with 0 paddings to match this parameter
- hop_size=200, # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate)
- win_size=800, # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)
- sample_rate=16000, # 16000Hz (corresponding to librispeech) (sox --i )
-
- frame_shift_ms=None, # Can replace hop_size parameter. (Recommended: 12.5)
-
- # Mel and Linear spectrograms normalization/scaling and clipping
- signal_normalization=True,
- # Whether to normalize mel spectrograms to some predefined range (following below parameters)
- allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True
- symmetric_mels=True,
- # Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2,
- # faster and cleaner convergence)
- max_abs_value=4.,
- # max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not
- # be too big to avoid gradient explosion,
- # not too small for fast convergence)
- # Contribution by @begeekmyfriend
- # Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude
- # levels. Also allows for better G&L phase reconstruction)
- preemphasize=True, # whether to apply filter
- preemphasis=0.97, # filter coefficient.
-
- # Limits
- min_level_db=-100,
- ref_level_db=20,
- fmin=55,
- # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To
- # test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
- fmax=7600, # To be increased/reduced depending on data.
-
- ###################### Our training parameters #################################
- img_size=96,
- fps=25,
-
- batch_size=16,
- initial_learning_rate=1e-4,
- nepochs=200000000000000000, ### ctrl + c, stop whenever eval loss is consistently greater than train loss for ~10 epochs
- num_workers=0,
- checkpoint_interval=3000,
- eval_interval=3000,
- save_optimizer_state=True,
-
- syncnet_wt=0.0, # is initially zero, will be set automatically to 0.03 later. Leads to faster convergence.
- syncnet_batch_size=64,
- syncnet_lr=1e-4,
- syncnet_eval_interval=10000,
- syncnet_checkpoint_interval=10000,
-
- disc_wt=0.07,
- disc_initial_learning_rate=1e-4,
-)
-
-
-def hparams_debug_string():
- values = hparams.values()
- hp = [" %s: %s" % (name, values[name]) for name in sorted(values) if name != "sentences"]
- return "Hyperparameters:\n" + "\n".join(hp)
diff --git a/spaces/MJ/AI-ChatBot/imgs/readme.md b/spaces/MJ/AI-ChatBot/imgs/readme.md
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/MLVKU/Human_Object_Interaction/hotr/data/datasets/vcoco.py b/spaces/MLVKU/Human_Object_Interaction/hotr/data/datasets/vcoco.py
deleted file mode 100644
index 895b4b6f64f2b965f8e9557f1af2f97ec302c3e2..0000000000000000000000000000000000000000
--- a/spaces/MLVKU/Human_Object_Interaction/hotr/data/datasets/vcoco.py
+++ /dev/null
@@ -1,467 +0,0 @@
-# Copyright (c) Kakaobrain, Inc. and its affiliates. All Rights Reserved
-"""
-V-COCO dataset which returns image_id for evaluation.
-"""
-from pathlib import Path
-
-from PIL import Image
-import os
-import numpy as np
-import json
-import torch
-import torch.utils.data
-import torchvision
-
-from torch.utils.data import Dataset
-from pycocotools.coco import COCO
-from pycocotools import mask as coco_mask
-
-from hotr.data.datasets import builtin_meta
-import hotr.data.transforms.transforms as T
-
-class VCocoDetection(Dataset):
- def __init__(self,
- img_folder,
- ann_file,
- all_file,
- filter_empty_gt=True,
- transforms=None):
- self.img_folder = img_folder
- self.file_meta = dict()
- self._transforms = transforms
-
- self.ann_file = ann_file
- self.all_file = all_file
- self.filter_empty_gt = filter_empty_gt
-
- # COCO initialize
- self.coco = COCO(self.all_file)
- self.COCO_CLASSES = builtin_meta._get_coco_instances_meta()['coco_classes']
- self.file_meta['coco_classes'] = self.COCO_CLASSES
-
- # Load V-COCO Dataset
- self.vcoco_all = self.load_vcoco(self.ann_file)
-
- # Save COCO annotation data
- self.image_ids = sorted(list(set(self.vcoco_all[0]['image_id'].reshape(-1))))
-
- # Filter Data
- if filter_empty_gt:
- self.filter_image_id()
- self.img_infos = self.load_annotations()
-
- # Refine Data
- self.save_action_name()
- self.mapping_inst_action_to_action()
- self.load_subobj_classes()
- self.CLASSES = self.act_list
-
- ############################################################################
- # Load V-COCO Dataset
- ############################################################################
- def load_vcoco(self, dir_name=None):
- with open(dir_name, 'rt') as f:
- vsrl_data = json.load(f)
-
- for i in range(len(vsrl_data)):
- vsrl_data[i]['role_object_id'] = np.array(vsrl_data[i]['role_object_id']).reshape((len(vsrl_data[i]['role_name']),-1)).T
- for j in ['ann_id', 'label', 'image_id']:
- vsrl_data[i][j] = np.array(vsrl_data[i][j]).reshape((-1,1))
-
- return vsrl_data
-
- ############################################################################
- # Refine Data
- ############################################################################
- def save_action_name(self):
- self.inst_act_list = list()
- self.act_list = list()
-
- # add instance action human classes
- self.num_subject_act = 0
- for vcoco in self.vcoco_all:
- self.inst_act_list.append('human_' + vcoco['action_name'])
- self.num_subject_act += 1
-
- # add instance action object classes
- for vcoco in self.vcoco_all:
- if len(vcoco['role_name']) == 3:
- self.inst_act_list.append('object_' + vcoco['action_name']+'_'+vcoco['role_name'][1])
- self.inst_act_list.append('object_' + vcoco['action_name']+'_'+vcoco['role_name'][2])
- elif len(vcoco['role_name']) < 2:
- continue
- else:
- self.inst_act_list.append('object_' + vcoco['action_name']+'_'+vcoco['role_name'][-1]) # when only two roles
-
- # add action classes
- for vcoco in self.vcoco_all:
- if len(vcoco['role_name']) == 3:
- self.act_list.append(vcoco['action_name']+'_'+vcoco['role_name'][1])
- self.act_list.append(vcoco['action_name']+'_'+vcoco['role_name'][2])
- else:
- self.act_list.append(vcoco['action_name']+'_'+vcoco['role_name'][-1])
-
- # add to meta
- self.file_meta['action_classes'] = self.act_list
-
- def mapping_inst_action_to_action(self):
- sub_idx = 0
- obj_idx = self.num_subject_act
-
- self.sub_label_to_action = list()
- self.obj_label_to_action = list()
-
- for vcoco in self.vcoco_all:
- role_name = vcoco['role_name']
-
- self.sub_label_to_action.append(sub_idx)
- if len(role_name) == 3 :
- self.sub_label_to_action.append(sub_idx)
- self.obj_label_to_action.append(obj_idx)
- self.obj_label_to_action.append(obj_idx+1)
- obj_idx += 2
- elif len(role_name) == 2:
- self.obj_label_to_action.append(obj_idx)
- obj_idx += 1
- else:
- self.obj_label_to_action.append(0)
-
- sub_idx += 1
-
- def load_subobj_classes(self):
- self.vcoco_labels = dict()
- for img in self.image_ids:
- self.vcoco_labels[img] = dict()
- self.vcoco_labels[img]['boxes'] = np.empty((0, 4), dtype=np.float32)
- self.vcoco_labels[img]['categories'] = np.empty((0), dtype=np.int32)
-
- ann_ids = self.coco.getAnnIds(imgIds=img, iscrowd=None)
- objs = self.coco.loadAnns(ann_ids)
-
- valid_ann_ids = []
-
- for i, obj in enumerate(objs):
- if 'ignore' in obj and obj['ignore'] == 1: continue
-
- x1 = obj['bbox'][0]
- y1 = obj['bbox'][1]
- x2 = x1 + np.maximum(0., obj['bbox'][2] - 1.)
- y2 = y1 + np.maximum(0., obj['bbox'][3] - 1.)
-
- if obj['area'] > 0 and x2 > x1 and y2 > y1:
- bbox = np.array([x1, y1, x2, y2]).reshape(1, -1)
- cls = obj['category_id']
- self.vcoco_labels[img]['boxes'] = np.concatenate([self.vcoco_labels[img]['boxes'], bbox], axis=0)
- self.vcoco_labels[img]['categories'] = np.concatenate([self.vcoco_labels[img]['categories'], [cls]], axis=0)
-
- valid_ann_ids.append(ann_ids[i])
-
- num_valid_objs = len(valid_ann_ids)
-
- self.vcoco_labels[img]['agent_actions'] = -np.ones((num_valid_objs, self.num_action()), dtype=np.int32)
- self.vcoco_labels[img]['obj_actions'] = np.zeros((num_valid_objs, self.num_action()), dtype=np.int32)
- self.vcoco_labels[img]['role_id'] = -np.ones((num_valid_objs, self.num_action()), dtype=np.int32)
-
- for ix, ann_id in enumerate(valid_ann_ids):
- in_vcoco = np.where(self.vcoco_all[0]['ann_id'] == ann_id)[0]
- if in_vcoco.size > 0:
- self.vcoco_labels[img]['agent_actions'][ix, :] = 0
-
- agent_act_id = 0
- obj_act_id = -1
- for i, x in enumerate(self.vcoco_all):
- has_label = np.where(np.logical_and(x['ann_id'] == ann_id, x['label'] == 1))[0]
- if has_label.size > 0:
- assert has_label.size == 1
- rids = x['role_object_id'][has_label]
-
- if rids.shape[1] == 3:
- self.vcoco_labels[img]['agent_actions'][ix, agent_act_id] = 1
- self.vcoco_labels[img]['agent_actions'][ix, agent_act_id+1] = 1
- agent_act_id += 2
- else:
- self.vcoco_labels[img]['agent_actions'][ix, agent_act_id] = 1
- agent_act_id += 1
- if rids.shape[1] == 1 : obj_act_id += 1
-
- for j in range(1, rids.shape[1]):
- obj_act_id += 1
- if rids[0, j] == 0: continue # no role
- aid = np.where(valid_ann_ids == rids[0, j])[0]
-
- self.vcoco_labels[img]['role_id'][ix, obj_act_id] = aid
- self.vcoco_labels[img]['obj_actions'][aid, obj_act_id] = 1
-
- else:
- rids = x['role_object_id'][0]
- if rids.shape[0] == 3:
- agent_act_id += 2
- obj_act_id += 2
- else:
- agent_act_id += 1
- obj_act_id += 1
-
- ############################################################################
- # Annotation Loader
- ############################################################################
- # >>> 1. instance
- def load_instance_annotations(self, image_index):
- num_ann = self.vcoco_labels[image_index]['boxes'].shape[0]
- inst_action = np.zeros((num_ann, self.num_inst_action()), np.int)
- inst_bbox = np.zeros((num_ann, 4), dtype=np.float32)
- inst_category = np.zeros((num_ann, ), dtype=np.int)
-
- for idx in range(num_ann):
- inst_bbox[idx] = self.vcoco_labels[image_index]['boxes'][idx]
- inst_category[idx]= self.vcoco_labels[image_index]['categories'][idx] #+ 1 # category 1 ~ 81
-
- if inst_category[idx] == 1:
- act = self.vcoco_labels[image_index]['agent_actions'][idx]
- inst_action[idx, :self.num_subject_act] = act[np.unique(self.sub_label_to_action, return_index=True)[1]]
-
- # when person is the obj
- act = self.vcoco_labels[image_index]['obj_actions'][idx] # when person is the obj
- if act.any():
- inst_action[idx, self.num_subject_act:] = act[np.nonzero(self.obj_label_to_action)[0]]
- if inst_action[idx, :self.num_subject_act].sum(axis=-1) < 0:
- inst_action[idx, :self.num_subject_act] = 0
- else:
- act = self.vcoco_labels[image_index]['obj_actions'][idx]
- inst_action[idx, self.num_subject_act:] = act[np.nonzero(self.obj_label_to_action)[0]]
-
- # >>> For Objects that are in COCO but not in V-COCO,
- # >>> Human -> [-1 * 26, 0 * 25]
- # >>> Object -> [0 * 51]
- # >>> Don't return anything for actions with max 0 or max -1
- max_val = inst_action.max(axis=1)
- if (max_val > 0).sum() == 0:
- print(f"No Annotations for {image_index}")
- print(inst_action)
- print(self.vcoco_labels[image_index]['agent_actions'][idx])
- print(self.vcoco_labels[image_index]['obj_actions'][idx])
-
- return inst_bbox[max_val > 0], inst_category[max_val > 0], inst_action[max_val > 0]
-
- # >>> 2. pair
- def load_pair_annotations(self, image_index):
- num_ann = self.vcoco_labels[image_index]['boxes'].shape[0]
- pair_action = np.zeros((0, self.num_action()), np.int)
- pair_bbox = np.zeros((0, 8), dtype=np.float32)
- pair_target = np.zeros((0, ), dtype=np.int)
-
- for idx in range(num_ann):
- h_box = self.vcoco_labels[image_index]['boxes'][idx]
- h_cat = self.vcoco_labels[image_index]['categories'][idx]
- if h_cat != 1 : continue # human_id = 1
-
- h_act = self.vcoco_labels[image_index]['agent_actions'][idx]
- if np.any((h_act==-1)) : continue
-
- o_act = dict()
- for aid in range(self.num_action()):
- if h_act[aid] == 0 : continue
- o_id = self.vcoco_labels[image_index]['role_id'][idx, aid]
- if o_id not in o_act : o_act[o_id] = list()
- o_act[o_id].append(aid)
-
- for o_id in o_act.keys():
- if o_id == -1:
- o_box = -np.ones((4, ))
- o_cat = -1 # target is background
- else:
- o_box = self.vcoco_labels[image_index]['boxes'][o_id]
- o_cat = self.vcoco_labels[image_index]['categories'][o_id] # category 0 ~ 80
-
- box = np.concatenate([h_box, o_box]).astype(np.float32)
- act = np.zeros((1, self.num_action()), np.int)
- tar = np.zeros((1, ), np.int)
- tar[0] = o_cat #+ 1 # category 1 ~ 81
- for o_aid in o_act[o_id] : act[0, o_aid] = 1
-
- pair_action = np.concatenate([pair_action, act], axis=0)
- pair_bbox = np.concatenate([pair_bbox, np.expand_dims(box, axis=0)], axis=0)
- pair_target = np.concatenate([pair_target, tar], axis=0)
-
- return pair_bbox, pair_action, pair_target
-
- # >>> 3. image infos
- def load_annotations(self):
- img_infos = []
- for i in self.image_ids:
- info = self.coco.loadImgs([i])[0]
- img_infos.append(info)
- return img_infos
-
- ############################################################################
- # Check Method
- ############################################################################
- def sum_action_ann_for_id(self, find_idx):
- sum = 0
- for action_ann in self.vcoco_all:
- img_ids = action_ann['image_id']
- img_labels = action_ann['label']
-
- final_inds = img_ids[img_labels == 1]
-
- if (find_idx in final_inds):
- sum += 1
- # sum of class-wise existence
- return (sum > 0)
-
- def filter_image_id(self):
- empty_gt_list = []
- for img_id in self.image_ids:
- if not self.sum_action_ann_for_id(img_id):
- empty_gt_list.append(img_id)
-
- for remove_id in empty_gt_list:
- rm_idx = self.image_ids.index(remove_id)
- self.image_ids.remove(remove_id)
-
- ############################################################################
- # Preprocessing
- ############################################################################
- def prepare_img(self, idx):
- img_info = self.img_infos[idx]
- image = Image.open(os.path.join(self.img_folder, img_info['file_name'])).convert('RGB')
- target = self.get_ann_info(idx)
-
- w, h = image.size
- target["orig_size"] = torch.as_tensor([int(h), int(w)])
- target["size"] = torch.as_tensor([int(h), int(w)])
-
- if self._transforms is not None:
- img, target = self._transforms(image, target) # "size" gets converted here
-
- return img, target
-
- ############################################################################
- # Get Method
- ############################################################################
- def __getitem__(self, idx):
- img, target = self.prepare_img(idx)
- return img, target
-
- def __len__(self):
- return len(self.image_ids)
-
- def get_human_label_idx(self):
- return self.sub_label_to_action
-
- def get_object_label_idx(self):
- return self.obj_label_to_action
-
- def get_image_ids(self):
- return self.image_ids
-
- def get_categories(self):
- return self.COCO_CLASSES
-
- def get_inst_action(self):
- return self.inst_act_list
-
- def get_actions(self):
- return self.act_list
-
- def get_human_action(self):
- return self.inst_act_list[:self.num_subject_act]
-
- def get_object_action(self):
- return self.inst_act_list[self.num_subject_act:]
-
- def get_ann_info(self, idx):
- img_idx = int(self.image_ids[idx])
-
- # load each annotation
- inst_bbox, inst_label, inst_actions = self.load_instance_annotations(img_idx)
- pair_bbox, pair_actions, pair_targets = self.load_pair_annotations(img_idx)
-
- sample = {
- 'image_id' : torch.tensor([img_idx]),
- 'boxes': torch.as_tensor(inst_bbox, dtype=torch.float32),
- 'labels': torch.tensor(inst_label, dtype=torch.int64),
- 'inst_actions': torch.tensor(inst_actions, dtype=torch.int64),
- 'pair_boxes': torch.as_tensor(pair_bbox, dtype=torch.float32),
- 'pair_actions': torch.tensor(pair_actions, dtype=torch.int64),
- 'pair_targets': torch.tensor(pair_targets, dtype=torch.int64),
- }
-
- return sample
-
- ############################################################################
- # Number Method
- ############################################################################
- def num_category(self):
- return len(self.COCO_CLASSES)
-
- def num_action(self):
- return len(self.act_list)
-
- def num_inst_action(self):
- return len(self.inst_act_list)
-
- def num_human_act(self):
- return len(self.inst_act_list[:self.num_subject_act])
-
- def num_object_act(self):
- return len(self.inst_act_list[self.num_subject_act:])
-
-def make_hoi_transforms(image_set):
- normalize = T.Compose([
- T.ToTensor(),
- T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
- ])
-
- scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
-
- if image_set == 'train':
- return T.Compose([
- T.RandomHorizontalFlip(),
- T.ColorJitter(.4, .4, .4),
- T.RandomSelect(
- T.RandomResize(scales, max_size=1333),
- T.Compose([
- T.RandomResize([400, 500, 600]),
- T.RandomSizeCrop(384, 600),
- T.RandomResize(scales, max_size=1333),
- ])
- ),
- normalize,
- ])
-
- if image_set == 'val':
- return T.Compose([
- T.RandomResize([800], max_size=1333),
- normalize,
- ])
-
- if image_set == 'test':
- return T.Compose([
- T.RandomResize([800], max_size=1333),
- normalize,
- ])
-
- raise ValueError(f'unknown {image_set}')
-
-def build(image_set, args):
- root = Path(args.data_path)
- assert root.exists(), f'provided V-COCO path {root} does not exist'
- PATHS = {
- "train": (root / "coco/images/train2014/", root / "data/vcoco" / 'vcoco_trainval.json'),
- "val": (root / "coco/images/val2014", root / "data/vcoco" / 'vcoco_test.json'),
- "test": (root / "coco/images/val2014", root / "data/vcoco" / 'vcoco_test.json'),
- }
-
- img_folder, ann_file = PATHS[image_set]
- all_file = root / "data/instances_vcoco_all_2014.json"
- dataset = VCocoDetection(
- img_folder = img_folder,
- ann_file = ann_file,
- all_file = all_file,
- filter_empty_gt=True,
- transforms = make_hoi_transforms(image_set)
- )
- dataset.file_meta['dataset_file'] = args.dataset_file
- dataset.file_meta['image_set'] = image_set
-
- return dataset
diff --git a/spaces/MWilinski/bot/tests/index/test_index.py b/spaces/MWilinski/bot/tests/index/test_index.py
deleted file mode 100644
index 6b51bcf6b654f87d340b9f0800ee85b7d1b659d2..0000000000000000000000000000000000000000
--- a/spaces/MWilinski/bot/tests/index/test_index.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import pytest
-from typing import Any
-from huggingface_hub import snapshot_download
-from langchain.embeddings import HuggingFaceInstructEmbeddings
-from langchain.vectorstores import FAISS
-
-
-snapshot_download(
- repo_id='KonradSzafer/index',
- allow_patterns=['*.faiss', '*.pkl'],
- repo_type='dataset',
- local_dir='index/'
-)
-
-@pytest.fixture(scope="module")
-def embedding_model() -> HuggingFaceInstructEmbeddings:
- model_name = "hkunlp/instructor-large"
- embed_instruction = "Represent the Hugging Face library documentation"
- query_instruction = "Query the most relevant piece of information from the Hugging Face documentation"
- return HuggingFaceInstructEmbeddings(
- model_name=model_name,
- embed_instruction=embed_instruction,
- query_instruction=query_instruction,
- )
-
-@pytest.fixture(scope="module")
-def index_path() -> str:
- return "index/"
-
-@pytest.fixture(scope="module")
-def index(embedding_model: HuggingFaceInstructEmbeddings, index_path: str):
- return FAISS.load_local(index_path, embedding_model)
-
-@pytest.fixture(scope="module")
-def query() -> str:
- return "How to use the tokenizer?"
-
-def test_load_index(embedding_model: HuggingFaceInstructEmbeddings, index_path: str):
- index = FAISS.load_local(index_path, embedding_model)
- assert index is not None, "Failed to load index"
-
-def test_index_page_content(index, query: str):
- query_docs = index.similarity_search(query=query, k=3)
- assert isinstance(query_docs[0].page_content, str)
-
-def test_index_metadata(index, query):
- query_docs = index.similarity_search(query=query, k=3)
- assert isinstance(query_docs[0].metadata['source'], str)
diff --git a/spaces/MarcusSu1216/XingTong/modules/attentions.py b/spaces/MarcusSu1216/XingTong/modules/attentions.py
deleted file mode 100644
index f9c11ca4a3acb86bf1abc04d9dcfa82a4ed4061f..0000000000000000000000000000000000000000
--- a/spaces/MarcusSu1216/XingTong/modules/attentions.py
+++ /dev/null
@@ -1,349 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import modules.commons as commons
-import modules.modules as modules
-from modules.modules import LayerNorm
-
-
-class FFT(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers=1, kernel_size=1, p_dropout=0.,
- proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(
- MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias,
- proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
- x = x * x_mask
- return x
-
-
-class Encoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert t_s == t_t, "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert t_s == t_t, "Local attention is only available for self-attention."
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/Marshalls/testmtd/feature_extraction/madmom/utils/midi.py b/spaces/Marshalls/testmtd/feature_extraction/madmom/utils/midi.py
deleted file mode 100644
index c653c594ca7276f65d3cbdcb95402ee8a368f973..0000000000000000000000000000000000000000
--- a/spaces/Marshalls/testmtd/feature_extraction/madmom/utils/midi.py
+++ /dev/null
@@ -1,1914 +0,0 @@
-# encoding: utf-8
-# pylint: disable=no-member
-# pylint: disable=invalid-name
-# pylint: disable=too-many-arguments
-# pylint: disable=too-few-public-methods
-"""
-This module contains MIDI functionality, but is deprecated as of version 0.16.
-Please use madmom.io.midi instead. This module will be removed in version 0.18.
-
-Almost all code is taken from Giles Hall's python-midi package:
-https://github.com/vishnubob/python-midi
-
-It combines the complete package in a single file, to make it easier to
-distribute. Most notable changes are `MIDITrack` and `MIDIFile` classes which
-handle all data i/o and provide a interface which allows to read/display all
-notes as simple numpy arrays. Also, the EventRegistry is handled differently.
-
-The last merged commit is 3053fefe.
-
-Since then the following commits have been added functionality-wise:
-
-- 0964c0b (prevent multiple tick conversions)
-- c43bf37 (add pitch and value properties to AfterTouchEvent)
-- 40111c6 (add 0x08 MetaEvent: ProgramNameEvent)
-- 43de818 (handle unknown MIDI meta events gracefully)
-
-Additionally, the module has been updated to work with Python3.
-
-The MIT License (MIT)
-Copyright (c) 2013 Giles F. Hall
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import sys
-import math
-import struct
-import numpy as np
-import warnings
-
-
-# constants
-OCTAVE_MAX_VALUE = 12
-OCTAVE_VALUES = list(range(OCTAVE_MAX_VALUE))
-
-NOTE_NAMES = ['C', 'Cs', 'D', 'Ds', 'E', 'F', 'Fs', 'G', 'Gs', 'A', 'As', 'B']
-WHITE_KEYS = [0, 2, 4, 5, 7, 9, 11]
-BLACK_KEYS = [1, 3, 6, 8, 10]
-NOTE_PER_OCTAVE = len(NOTE_NAMES)
-NOTE_VALUES = list(range(OCTAVE_MAX_VALUE * NOTE_PER_OCTAVE))
-NOTE_NAME_MAP_FLAT = {}
-NOTE_VALUE_MAP_FLAT = []
-NOTE_NAME_MAP_SHARP = {}
-NOTE_VALUE_MAP_SHARP = []
-
-for index in range(128):
- note_idx = index % NOTE_PER_OCTAVE
- oct_idx = index / OCTAVE_MAX_VALUE
- note_name = NOTE_NAMES[note_idx]
- if len(note_name) == 2:
- # sharp note
- flat = NOTE_NAMES[note_idx + 1] + 'b'
- NOTE_NAME_MAP_FLAT['%s_%d' % (flat, oct_idx)] = index
- NOTE_NAME_MAP_SHARP['%s_%d' % (note_name, oct_idx)] = index
- NOTE_VALUE_MAP_FLAT.append('%s_%d' % (flat, oct_idx))
- NOTE_VALUE_MAP_SHARP.append('%s_%d' % (note_name, oct_idx))
- globals()['%s_%d' % (note_name[0] + 's', oct_idx)] = index
- globals()['%s_%d' % (flat, oct_idx)] = index
- else:
- NOTE_NAME_MAP_FLAT['%s_%d' % (note_name, oct_idx)] = index
- NOTE_NAME_MAP_SHARP['%s_%d' % (note_name, oct_idx)] = index
- NOTE_VALUE_MAP_FLAT.append('%s_%d' % (note_name, oct_idx))
- NOTE_VALUE_MAP_SHARP.append('%s_%d' % (note_name, oct_idx))
- globals()['%s_%d' % (note_name, oct_idx)] = index
-
-BEAT_NAMES = ['whole', 'half', 'quarter', 'eighth', 'sixteenth',
- 'thirty-second', 'sixty-fourth']
-BEAT_VALUES = [4, 2, 1, .5, .25, .125, .0625]
-WHOLE = 0
-HALF = 1
-QUARTER = 2
-EIGHTH = 3
-SIXTEENTH = 4
-THIRTY_SECOND = 5
-SIXTY_FOURTH = 6
-
-HEADER_SIZE = 14
-
-RESOLUTION = 480 # ticks per quarter note
-TEMPO = 120
-TIME_SIGNATURE_NUMERATOR = 4
-TIME_SIGNATURE_DENOMINATOR = 4
-TIME_SIGNATURE = (TIME_SIGNATURE_NUMERATOR, TIME_SIGNATURE_DENOMINATOR)
-SECONDS_PER_QUARTER_NOTE = 60. / TEMPO
-SECONDS_PER_TICK = SECONDS_PER_QUARTER_NOTE / RESOLUTION
-
-warnings.warn('Deprecated as of version 0.16. Please use madmom.io.midi '
- 'instead. This module will be removed in version 0.18.')
-
-# Ensure Python2/3 compatibility when reading bytes from MIDI files
-if sys.version_info[0] == 2:
- int2byte = chr
-
- def byte2int(byte):
- """Convert a byte-character to an integer."""
- return ord(byte)
-else:
- int2byte = struct.Struct(">B").pack
-
- def byte2int(byte):
- """Convert a byte-character to an integer."""
- return byte
-
-
-# functions for packing / unpacking variable length data
-def read_variable_length(data):
- """
- Read a variable length variable from the given data.
-
- Parameters
- ----------
- data : bytearray
- Data of variable length.
-
- Returns
- -------
- length : int
- Length in bytes.
-
- """
- next_byte = 1
- value = 0
- while next_byte:
- next_value = byte2int(next(data))
- # is the hi-bit set?
- if not next_value & 0x80:
- # no next BYTE
- next_byte = 0
- # mask out the 8th bit
- next_value &= 0x7f
- # shift last value up 7 bits
- value <<= 7
- # add new value
- value += next_value
- return value
-
-
-def write_variable_length(value):
- """
- Write a variable length variable.
-
- Parameters
- ----------
- value : bytearray
- Value to be encoded as a variable of variable length.
-
- Returns
- -------
- bytearray
- Variable with variable length.
-
- """
- result = bytearray()
- result.insert(0, value & 0x7F)
- value >>= 7
- if value:
- result.insert(0, (value & 0x7F) | 0x80)
- value >>= 7
- if value:
- result.insert(0, (value & 0x7F) | 0x80)
- value >>= 7
- if value:
- result.insert(0, (value & 0x7F) | 0x80)
- return result
-
-
-# class for dynamically registering event classes
-class EventRegistry(object):
- """
- Class for registering Events.
-
- Event classes should be registered manually by calling
- EventRegistry.register_event(EventClass) after the class definition.
-
- Normal events are registered in the `events` dictionary and use the event's
- `status_msg` as a key; meta events are registered in the `meta_events`
- dictionary and use their `meta_command` as key.
-
- """
- events = {}
- meta_events = {}
-
- @classmethod
- def register_event(cls, event):
- """
- Registers an event in the registry.
-
- Parameters
- ----------
- event : :class:`Event` instance
- Event to be registered.
-
- """
- # normal events
- if any(b in (Event, ChannelEvent, NoteEvent) for b in event.__bases__):
- # raise an error if the event class is registered already
- if event.status_msg in cls.events:
- raise AssertionError("Event %s already registered" %
- event.name)
- # register the Event
- cls.events[event.status_msg] = event
- # meta events
- elif any(b in (MetaEvent, MetaEventWithText) for b in event.__bases__):
- # raise an error if the meta event class is registered already
- if event.meta_command in EventRegistry.meta_events:
- raise AssertionError("Event %s already registered" %
- event.name)
- # register the MetaEvent
- cls.meta_events[event.meta_command] = event
- # unknown events
- else:
- # raise an error
- raise AssertionError("Unknown base class in event type: %s" %
- event.__bases__)
-
-
-class Event(object):
- """
- Generic MIDI Event.
-
- """
- name = "Generic MIDI Event"
- length = 0
- status_msg = 0x0
- # sort is a float value used for sorting events occurring at the same tick
- sort = 0.
-
- def __init__(self, **kwargs):
- if isinstance(self.length, int):
- data = [0] * self.length
- else:
- data = []
- self.tick = 0
- self.data = data
- for key in kwargs:
- setattr(self, key, kwargs[key])
-
- def __eq__(self, other):
- return (
- self.tick == other.tick and self.data == other.data and
- self.status_msg == other.status_msg)
-
- def __ne__(self, other):
- return not self == other
-
- def __lt__(self, other):
- if self.tick < other.tick:
- return True
- elif self.tick == other.tick and self.sort < other.sort:
- return True
- return False
-
- def __le__(self, other):
- return NotImplementedError
-
- def __gt__(self, other):
- if self.tick > other.tick:
- return True
- elif self.tick == other.tick and self.sort > other.sort:
- return True
- return False
-
- def __ge__(self, other):
- return NotImplementedError
-
- def __str__(self):
- return "%s: tick: %s data: %s" % (
- self.__class__.__name__, self.tick, self.data)
-
-
-class ChannelEvent(Event):
- """
- Event with a channel number.
-
- """
- name = 'ChannelEvent'
-
- def __init__(self, **kwargs):
- super(ChannelEvent, self).__init__(**kwargs)
- self.channel = kwargs.get('channel', 0)
-
- def __eq__(self, other):
- return (
- self.tick == other.tick and self.channel == other.channel and
- self.data == other.data and self.status_msg == other.status_msg)
-
- def __str__(self):
- return "%s: tick: %s channel: %s data: %s" % (
- self.__class__.__name__, self.tick, self.channel, self.data)
-
-
-class NoteEvent(ChannelEvent):
- """
- NoteEvent is a special subclass of Event that is not meant to be used as a
- concrete class. It defines the generalities of NoteOn and NoteOff events.
-
- """
- length = 2
-
- def __str__(self):
- return "%s: tick: %s channel: %s pitch: %s velocity: %s" % (
- self.__class__.__name__, self.tick, self.channel, self.pitch,
- self.velocity)
-
- @property
- def pitch(self):
- """
- Pitch of the note event.
-
- """
- return self.data[0]
-
- @pitch.setter
- def pitch(self, pitch):
- """
- Set the pitch of the note event.
-
- Parameters
- ----------
- pitch : int
- Pitch of the note.
-
- """
- self.data[0] = pitch
-
- @property
- def velocity(self):
- """
- Velocity of the note event.
-
- """
- return self.data[1]
-
- @velocity.setter
- def velocity(self, velocity):
- """
- Set the velocity of the note event.
-
- Parameters
- ----------
- velocity : int
- Velocity of the note.
-
- """
- self.data[1] = velocity
-
-
-class NoteOnEvent(NoteEvent):
- """
- Note On Event.
-
- """
- status_msg = 0x90
- name = 'Note On'
- sort = .1 # make sure it is sorted before NoteOffEvent
-
-EventRegistry.register_event(NoteOnEvent)
-
-
-class NoteOffEvent(NoteEvent):
- """
- Note Off Event.
-
- """
- status_msg = 0x80
- name = 'Note Off'
- sort = .2 # make sure it is sorted after NoteOnEvent
-
-EventRegistry.register_event(NoteOffEvent)
-
-
-class AfterTouchEvent(ChannelEvent):
- """
- After Touch Event.
-
- """
- status_msg = 0xA0
- length = 2
- name = 'After Touch'
-
- def __str__(self):
- return "%s: tick: %s channel: %s pitch: %s value: %s" % (
- self.__class__.__name__, self.tick, self.channel, self.pitch,
- self.value)
-
- @property
- def pitch(self):
- """
- Pitch of the after touch event.
-
- """
- return self.data[0]
-
- @pitch.setter
- def pitch(self, pitch):
- """
- Set the pitch of the after touch event.
-
- Parameters
- ----------
- pitch : int
- Pitch of the after touch event.
-
- """
- self.data[0] = pitch
-
- @property
- def value(self):
- """
- Value of the after touch event.
-
- """
- return self.data[1]
-
- @value.setter
- def value(self, value):
- """
- Set the value of the after touch event.
-
- Parameters
- ----------
- value : int
- Value of the after touch event.
-
- """
- self.data[1] = value
-
-EventRegistry.register_event(AfterTouchEvent)
-
-
-class ControlChangeEvent(ChannelEvent):
- """
- Control Change Event.
-
- """
- status_msg = 0xB0
- length = 2
- name = 'Control Change'
-
- def __str__(self):
- return "%s: tick: %s channel: %s control: %s value: %s" % (
- self.__class__.__name__, self.tick, self.channel, self.control,
- self.value)
-
- @property
- def control(self):
- """
- Control ID.
-
- """
- return self.data[0]
-
- @control.setter
- def control(self, control):
- """
- Set control ID.
-
- Parameters
- ----------
- control : int
- Control ID.
-
- """
- self.data[0] = control
-
- @property
- def value(self):
- """
- Value of the controller.
-
- """
- return self.data[1]
-
- @value.setter
- def value(self, value):
- """
- Set the value of the controller.
-
- Parameters
- ----------
- value : int
- Value of the controller.
-
- """
- self.data[1] = value
-
-EventRegistry.register_event(ControlChangeEvent)
-
-
-class ProgramChangeEvent(ChannelEvent):
- """
- Program Change Event.
-
- """
- status_msg = 0xC0
- length = 1
- name = 'Program Change'
-
- def __str__(self):
- return "%s: tick: %s channel: %s value: %s" % (
- self.__class__.__name__, self.tick, self.channel, self.value)
-
- @property
- def value(self):
- """
- Value of the Program Change Event.
-
- """
- return self.data[0]
-
- @value.setter
- def value(self, value):
- """
- Set the value of the Program Change Event.
-
- Parameters
- ----------
- value : int
- Value of the Program Change Event.
-
- """
- self.data[0] = value
-
-EventRegistry.register_event(ProgramChangeEvent)
-
-
-class ChannelAfterTouchEvent(ChannelEvent):
- """
- Channel After Touch Event.
-
- """
- status_msg = 0xD0
- length = 1
- name = 'Channel After Touch'
-
- def __str__(self):
- return "%s: tick: %s channel: %s value: %s" % (
- self.__class__.__name__, self.tick, self.channel, self.value)
-
- @property
- def value(self):
- """
- Value of the Channel After Touch Event.
-
- """
- return self.data[0]
-
- @value.setter
- def value(self, value):
- """
- Set the value of the Channel After Touch Event.
-
- Parameters
- ----------
- value : int
- Value of the Channel After Touch Event.
-
- """
- self.data[0] = value
-
-EventRegistry.register_event(ChannelAfterTouchEvent)
-
-
-class PitchWheelEvent(ChannelEvent):
- """
- Pitch Wheel Event.
-
- """
- status_msg = 0xE0
- length = 2
- name = 'Pitch Wheel'
-
- @property
- def pitch(self):
- """
- Pitch of the Pitch Wheel Event.
-
- """
- return ((self.data[1] << 7) | self.data[0]) - 0x2000
-
- @pitch.setter
- def pitch(self, pitch):
- """
- Set the pitch of the Pitch Wheel Event.
-
- Parameters
- ----------
- pitch : int
- Pitch of the Pitch Wheel Event.
-
- """
- value = pitch + 0x2000
- self.data[0] = value & 0x7F
- self.data[1] = (value >> 7) & 0x7F
-
-EventRegistry.register_event(PitchWheelEvent)
-
-
-class SysExEvent(Event):
- """
- System Exclusive Event.
-
- """
- status_msg = 0xF0
- length = 'variable'
- name = 'SysEx'
-
-EventRegistry.register_event(SysExEvent)
-
-
-class MetaEvent(Event):
- """
- MetaEvent is a special subclass of Event that is not meant to be used as a
- concrete class. It defines a subset of Events known as the Meta events.
-
- """
- status_msg = 0xFF
- meta_command = 0x0
- name = 'Meta Event'
-
- def __eq__(self, other):
- return (
- self.tick == other.tick and self.data == other.data and
- self.status_msg == other.status_msg and
- self.meta_command == other.meta_command)
-
-
-class MetaEventWithText(MetaEvent):
- """
- Meta Event With Text.
-
- """
- def __init__(self, **kwargs):
- super(MetaEventWithText, self).__init__(**kwargs)
- if 'text' not in kwargs:
- self.text = ''.join(chr(datum) for datum in self.data)
-
- def __str__(self):
- return "%s: %s" % (self.__class__.__name__, self.text)
-
-
-class SequenceNumberMetaEvent(MetaEvent):
- """
- Sequence Number Meta Event.
-
- """
- meta_command = 0x00
- length = 2
- name = 'Sequence Number'
-
-EventRegistry.register_event(SequenceNumberMetaEvent)
-
-
-class TextMetaEvent(MetaEventWithText):
- """
- Text Meta Event.
-
- """
- meta_command = 0x01
- length = 'variable'
- name = 'Text'
-
-EventRegistry.register_event(TextMetaEvent)
-
-
-class CopyrightMetaEvent(MetaEventWithText):
- """
- Copyright Meta Event.
-
- """
- meta_command = 0x02
- length = 'variable'
- name = 'Copyright Notice'
-
-EventRegistry.register_event(CopyrightMetaEvent)
-
-
-class TrackNameEvent(MetaEventWithText):
- """
- Track Name Event.
-
- """
- meta_command = 0x03
- length = 'variable'
- name = 'Track Name'
-
-EventRegistry.register_event(TrackNameEvent)
-
-
-class InstrumentNameEvent(MetaEventWithText):
- """
- Instrument Name Event.
-
- """
- meta_command = 0x04
- length = 'variable'
- name = 'Instrument Name'
-
-EventRegistry.register_event(InstrumentNameEvent)
-
-
-class LyricsEvent(MetaEventWithText):
- """
- Lyrics Event.
-
- """
- meta_command = 0x05
- length = 'variable'
- name = 'Lyrics'
-
-EventRegistry.register_event(LyricsEvent)
-
-
-class MarkerEvent(MetaEventWithText):
- """
- Marker Event.
-
- """
- meta_command = 0x06
- length = 'variable'
- name = 'Marker'
-
-EventRegistry.register_event(MarkerEvent)
-
-
-class CuePointEvent(MetaEventWithText):
- """
- Cue Point Event.
-
- """
- meta_command = 0x07
- length = 'variable'
- name = 'Cue Point'
-
-EventRegistry.register_event(CuePointEvent)
-
-
-class ProgramNameEvent(MetaEventWithText):
- """
- Program Name Event.
-
- """
- meta_command = 0x08
- length = 'variable'
- name = 'Program Name'
-
-EventRegistry.register_event(ProgramNameEvent)
-
-
-class UnknownMetaEvent(MetaEvent):
- """
- Unknown Meta Event.
-
- Parameters
- ----------
- meta_command : int
- Value of the meta command.
-
- """
- meta_command = None
- name = 'Unknown'
-
- def __init__(self, **kwargs):
- super(UnknownMetaEvent, self).__init__(**kwargs)
- # TODO: is this needed, should be handled by Event already
- self.meta_command = kwargs['meta_command']
-
-EventRegistry.register_event(UnknownMetaEvent)
-
-
-class ChannelPrefixEvent(MetaEvent):
- """
- Channel Prefix Event.
-
- """
- meta_command = 0x20
- length = 1
- name = 'Channel Prefix'
-
-EventRegistry.register_event(ChannelPrefixEvent)
-
-
-class PortEvent(MetaEvent):
- """
- Port Event.
-
- """
- meta_command = 0x21
- name = 'MIDI Port/Cable'
-
-EventRegistry.register_event(PortEvent)
-
-
-class TrackLoopEvent(MetaEvent):
- """
- Track Loop Event.
-
- """
- meta_command = 0x2E
- name = 'Track Loop'
-
-EventRegistry.register_event(TrackLoopEvent)
-
-
-class EndOfTrackEvent(MetaEvent):
- """
- End Of Track Event.
-
- """
- meta_command = 0x2F
- name = 'End of Track'
- sort = .99 # should always come last
-
-EventRegistry.register_event(EndOfTrackEvent)
-
-
-class SetTempoEvent(MetaEvent):
- """
- Set Tempo Event.
-
- """
- meta_command = 0x51
- length = 3
- name = 'Set Tempo'
-
- def __str__(self):
- return "%s: tick: %s microseconds per quarter note: %s" % (
- self.__class__.__name__, self.tick,
- self.microseconds_per_quarter_note)
-
- @property
- def microseconds_per_quarter_note(self):
- """
- Microseconds per quarter note.
-
- """
- assert len(self.data) == 3
- values = [self.data[x] << (16 - (8 * x)) for x in range(3)]
- return sum(values)
-
- @microseconds_per_quarter_note.setter
- def microseconds_per_quarter_note(self, microseconds):
- """
- Set microseconds per quarter note.
-
- Parameters
- ----------
- microseconds : int
- Microseconds per quarter note.
-
- """
- self.data = [(microseconds >> (16 - (8 * x)) & 0xFF) for x in range(3)]
-
-EventRegistry.register_event(SetTempoEvent)
-
-
-class SmpteOffsetEvent(MetaEvent):
- """
- SMPTE Offset Event.
-
- """
- meta_command = 0x54
- name = 'SMPTE Offset'
-
-EventRegistry.register_event(SmpteOffsetEvent)
-
-
-class TimeSignatureEvent(MetaEvent):
- """
- Time Signature Event.
-
- """
- meta_command = 0x58
- length = 4
- name = 'Time Signature'
-
- @property
- def numerator(self):
- """
- Numerator of the time signature.
-
- """
- return self.data[0]
-
- @numerator.setter
- def numerator(self, numerator):
- """
- Set numerator of the time signature.
-
- Parameters
- ----------
- numerator : int
- Numerator of the time signature.
- """
- self.data[0] = numerator
-
- @property
- def denominator(self):
- """
- Denominator of the time signature.
-
- """
- return 2 ** self.data[1]
-
- @denominator.setter
- def denominator(self, denominator):
- """
- Set denominator of the time signature.
-
- Parameters
- ----------
- denominator : int
- Denominator of the time signature.
-
- """
- self.data[1] = int(math.log(denominator, 2))
-
- @property
- def metronome(self):
- """
- Metronome.
-
- """
- return self.data[2]
-
- @metronome.setter
- def metronome(self, metronome):
- """
- Set metronome of the time signature.
-
- Parameters
- ----------
- metronome : int
- Metronome of the time signature.
-
- """
- self.data[2] = metronome
-
- @property
- def thirty_seconds(self):
- """
- Thirty-seconds of the time signature.
-
- """
- return self.data[3]
-
- @thirty_seconds.setter
- def thirty_seconds(self, thirty_seconds):
- """
- Set thirty-seconds of the time signature.
-
- Parameters
- ----------
- thirty_seconds : int
- Thirty-seconds of the time signature.
-
- """
- self.data[3] = thirty_seconds
-
-EventRegistry.register_event(TimeSignatureEvent)
-
-
-class KeySignatureEvent(MetaEvent):
- """
- Key Signature Event.
-
- """
- meta_command = 0x59
- length = 2
- name = 'Key Signature'
-
- @property
- def alternatives(self):
- """
- Alternatives of the key signature.
-
- """
- return self.data[0] - 256 if self.data[0] > 127 else self.data[0]
-
- @alternatives.setter
- def alternatives(self, alternatives):
- """
- Set alternatives of the key signature.
-
- Parameters
- ----------
- alternatives : int
- Alternatives of the key signature.
-
- """
- self.data[0] = 256 + alternatives if alternatives < 0 else alternatives
-
- @property
- def minor(self):
- """
- Major / minor.
-
- """
- return self.data[1]
-
- @minor.setter
- def minor(self, val):
- """
- Set major / minor.
-
- Parameters
- ----------
- val : int
- Major / minor.
-
- """
- self.data[1] = val
-
-EventRegistry.register_event(KeySignatureEvent)
-
-
-class SequencerSpecificEvent(MetaEvent):
- """
- Sequencer Specific Event.
-
- """
- meta_command = 0x7F
- name = 'Sequencer Specific'
-
-EventRegistry.register_event(SequencerSpecificEvent)
-
-
-def _add_channel(notes, channel=0):
- """
- Adds a default channel to the notes if missing.
-
- Parameters
- ----------
- notes : numpy array, shape (num_notes, 2)
- Notes, one per row (column definition see notes).
- channel : int, optional
- Note channel if not defined by `notes`.
-
- Returns
- -------
- numpy array
- Notes (including note channel).
-
- Notes
- -----
- The note columns format must be (channel being optional):
-
- 'onset' 'pitch' 'duration' 'velocity' ['channel']
-
- """
- if not notes.ndim == 2:
- raise ValueError('unknown format for `notes`')
- rows, columns = notes.shape
- if columns == 5:
- return notes
- elif columns == 4:
- channels = np.ones((rows, 1)) * channel
- return np.hstack((notes, channels))
- raise ValueError('unable to handle `notes` with %d columns' % columns)
-
-
-# MIDI Track
-class MIDITrack(object):
- """
- MIDI Track.
-
- Parameters
- ----------
- events : list
- MIDI events.
-
- Notes
- -----
- All events are stored with timing information in absolute ticks.
- The events must be sorted. Consider using `from_notes()` method.
-
- Examples
- --------
-
- Create a MIDI track from a list of events. Please note that the events must
- be sorted.
-
- >>> e1 = NoteOnEvent(tick=100, pitch=50, velocity=60)
- >>> e2 = NoteOffEvent(tick=300, pitch=50)
- >>> e3 = NoteOnEvent(tick=200, pitch=62, velocity=90)
- >>> e4 = NoteOffEvent(tick=600, pitch=62)
- >>> t = MIDITrack(sorted([e1, e2, e3, e4]))
- >>> t # doctest: +ELLIPSIS
-
- >>> t.events # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
- [,
- ,
- ,
- ]
-
- It can also be created from an array containing the notes. The `from_notes`
- method also takes care of creating tempo and time signature events.
-
- >>> notes = np.array([[0.1, 50, 0.3, 60], [0.2, 62, 0.4, 90]])
- >>> t = MIDITrack.from_notes(notes)
- >>> t # doctest: +ELLIPSIS
-
- >>> t.events # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
- [,
- ,
- ,
- ,
- ,
- ]
-
- """
-
- def __init__(self, events=None):
- if events is None:
- self.events = []
- else:
- # do not sort the events, since they can have relative timing!
- self.events = events
-
- def _make_ticks_abs(self):
- """Make the track's events timing information absolute."""
- running_tick = 0
- for event in self.events:
- event.tick += running_tick
- running_tick = event.tick
-
- def _make_ticks_rel(self):
- """Make the track's events timing information relative."""
- running_tick = 0
- for event in self.events:
- event.tick -= running_tick
- running_tick += event.tick
-
- @property
- def data_stream(self):
- """
- MIDI data stream representation of the track.
-
- """
- # sort the events
- self.events.sort()
- # first make sure the timing information is relative
- self._make_ticks_rel()
- # and unset the status message
- status = None
- # then encode all events of the track
- track_data = bytearray()
- for event in self.events:
- # encode the event data, first the timing information
- track_data.extend(write_variable_length(event.tick))
- # is the event a MetaEvent?
- if isinstance(event, MetaEvent):
- track_data.append(event.status_msg)
- track_data.append(event.meta_command)
- track_data.extend(write_variable_length(len(event.data)))
- track_data.extend(event.data)
- # is this event a SysEx Event?
- elif isinstance(event, SysExEvent):
- track_data.append(0xF0)
- track_data.extend(event.data)
- track_data.append(0xF7)
- # not a meta or SysEx event, must be a general message
- elif isinstance(event, Event):
- if not status or status.status_msg != event.status_msg or \
- status.channel != event.channel:
- status = event
- track_data.append(event.status_msg | event.channel)
- track_data.extend(event.data)
- else:
- raise ValueError("Unknown MIDI Event: " + str(event))
- # TODO: should we add a EndOfTrackEvent?
- # convert events back to absolute ticks
- self._make_ticks_abs()
- # prepare the data
- data = bytearray()
- # generate a MIDI header
- data.extend(b'MTrk')
- data.extend(struct.pack(">L", len(track_data)))
- # append the track data
- data.extend(track_data)
- # return the track data
- return data
-
- @classmethod
- def from_stream(cls, midi_stream):
- """
- Create a MIDI track by reading the data from a stream.
-
- Parameters
- ----------
- midi_stream : open file handle
- MIDI file stream (e.g. open MIDI file handle)
-
- Returns
- -------
- :class:`MIDITrack` instance
- :class:`MIDITrack` instance
-
- """
- events = []
- # reset the status
- status = None
- # first four bytes are Track header
- chunk = midi_stream.read(4)
- if chunk != b'MTrk':
- raise TypeError("Bad track header in MIDI file: %s" % chunk)
- # next four bytes are track size
- track_size = struct.unpack(">L", midi_stream.read(4))[0]
- track_data = iter(midi_stream.read(track_size))
- # read in all events
- while True:
- try:
- # first datum is variable length representing the delta-time
- tick = read_variable_length(track_data)
- # next byte is status message
- status_msg = byte2int(next(track_data))
- # is the event a MetaEvent?
- if MetaEvent.status_msg == status_msg:
- meta_cmd = byte2int(next(track_data))
- if meta_cmd not in EventRegistry.meta_events:
- import warnings
- warnings.warn("Unknown Meta MIDI Event: %s" % meta_cmd)
- event_cls = UnknownMetaEvent
- else:
- event_cls = EventRegistry.meta_events[meta_cmd]
- data_len = read_variable_length(track_data)
- data = [byte2int(next(track_data)) for _ in
- range(data_len)]
- # create an event and append it to the list
- events.append(event_cls(tick=tick, data=data,
- meta_command=meta_cmd))
- # is this event a SysEx Event?
- elif SysExEvent.status_msg == status_msg:
- data = []
- while True:
- datum = byte2int(next(track_data))
- if datum == 0xF7:
- break
- data.append(datum)
- # create an event and append it to the list
- events.append(SysExEvent(tick=tick, data=data))
- # not a meta or SysEx event, must be a general MIDI event
- else:
- key = status_msg & 0xF0
- if key not in EventRegistry.events:
- assert status, "Bad byte value"
- data = []
- key = status & 0xF0
- event_cls = EventRegistry.events[key]
- channel = status & 0x0F
- data.append(status_msg)
- data += [byte2int(next(track_data)) for _ in
- range(event_cls.length - 1)]
- # create an event and append it to the list
- events.append(event_cls(tick=tick, channel=channel,
- data=data))
- else:
- status = status_msg
- event_cls = EventRegistry.events[key]
- channel = status & 0x0F
- data = [byte2int(next(track_data)) for _ in
- range(event_cls.length)]
- # create an event and append it to the list
- events.append(event_cls(tick=tick, channel=channel,
- data=data))
- # no more events to be processed
- except StopIteration:
- break
- # create a new track
- track = cls(events)
- # make the timing of the events (i.e. the ticks) absolute
- track._make_ticks_abs()
- # return this track
- return track
-
- @classmethod
- def from_notes(cls, notes, tempo=TEMPO, time_signature=TIME_SIGNATURE,
- resolution=RESOLUTION):
- """
- Create a MIDI track from the given notes.
-
- Parameters
- ----------
- notes : numpy array
- Array with the notes, one per row. The columns must be:
- (onset time, pitch, duration, velocity, [channel]).
- tempo : float, optional
- Tempo of the MIDI track, given in beats per minute (bpm).
- time_signature : tuple, optional
- Time signature of the track, e.g. (4, 4) for 4/4.
- resolution : int
- Resolution (i.e. ticks per quarter note) of the MIDI track.
-
- Returns
- -------
- :class:`MIDITrack` instance
- :class:`MIDITrack` instance
-
- Notes
- -----
- All events including the generated tempo and time signature events is
- included in the returned track (i.e. as defined in MIDI format 0).
-
- """
- # add a default channel if needed
- notes = _add_channel(notes)
-
- # set time signature
- sig = TimeSignatureEvent(tick=0)
- sig.numerator, sig.denominator = time_signature
-
- # length of a quarter note (seconds)
- quarter_note_length = 60. / tempo * sig.denominator / 4
- # quarter notes per second
- quarter_notes_per_second = 1 / quarter_note_length
- # ticks per second
- ticks_per_second = resolution * quarter_notes_per_second
-
- # set tempo
- tempo = SetTempoEvent(tick=0)
- tempo.microseconds_per_quarter_note = int(quarter_note_length * 1e6)
-
- # list for events (ticks in absolute timing)
- events = []
-
- # add the notes
- for note in notes:
- onset, pitch, duration, velocity, channel = note
- # add NoteOn
- e_on = NoteOnEvent()
- e_on.tick = int(onset * ticks_per_second)
- e_on.pitch = int(pitch)
- e_on.velocity = int(velocity)
- e_on.channel = int(channel)
- # and NoteOff
- e_off = NoteOffEvent()
- e_off.tick = int((onset + duration) * ticks_per_second)
- e_off.pitch = int(pitch)
- e_off.channel = int(channel)
- events.append(e_on)
- events.append(e_off)
- # sort the events and prepend the tempo and time signature events
- events = sorted(events)
- events.insert(0, sig)
- events.insert(0, tempo)
- # create a track from the events
- return cls(events)
-
-
-# File I/O classes
-class MIDIFile(object):
- """
- MIDI File.
-
- Parameters
- ----------
- tracks : list
- List of :class:`MIDITrack` instances.
- resolution : int, optional
- Resolution (i.e. microseconds per quarter note).
- file_format : int, optional
- Format of the MIDI file.
-
- Notes
- -----
- Writing a MIDI file assumes a tempo of 120 beats per minute (bpm) and a 4/4
- time signature and writes all events into a single track (i.e. MIDI format
- 0).
-
- Examples
- --------
- Create a MIDI file from an array with notes. The format of the note array
- is: 'onset time', 'pitch', 'duration', 'velocity', 'channel'. The last
- column can be omitted, assuming channel 0.
-
- >>> notes = np.array([[0, 50, 1, 60], [0.5, 62, 0.5, 90]])
- >>> m = MIDIFile.from_notes(notes)
- >>> m # doctest: +ELLIPSIS
-
-
- The notes can be accessed as a numpy array in various formats (default is
- seconds):
-
- >>> m.notes()
- array([[ 0. , 50. , 1. , 60. , 0. ],
- [ 0.5, 62. , 0.5, 90. , 0. ]])
- >>> m.notes(unit='ticks')
- array([[ 0., 50., 960., 60., 0.],
- [480., 62., 480., 90., 0.]])
- >>> m.notes(unit='beats')
- array([[ 0., 50., 2., 60., 0.],
- [ 1., 62., 1., 90., 0.]])
-
- >>> m = MIDIFile.from_notes(notes, tempo=60)
- >>> m.notes(unit='ticks')
- array([[ 0., 50., 480., 60., 0.],
- [240., 62., 240., 90., 0.]])
- >>> m.notes(unit='beats')
- array([[ 0. , 50. , 1. , 60. , 0. ],
- [ 0.5, 62. , 0.5, 90. , 0. ]])
-
- >>> m = MIDIFile.from_notes(notes, tempo=60, time_signature=(2, 2))
- >>> m.notes(unit='ticks')
- array([[ 0., 50., 960., 60., 0.],
- [480., 62., 480., 90., 0.]])
- >>> m.notes(unit='beats')
- array([[ 0. , 50. , 1. , 60. , 0. ],
- [ 0.5, 62. , 0.5, 90. , 0. ]])
-
- >>> m = MIDIFile.from_notes(notes, tempo=240, time_signature=(3, 8))
- >>> m.notes(unit='ticks')
- array([[ 0., 50., 960., 60., 0.],
- [480., 62., 480., 90., 0.]])
- >>> m.notes(unit='beats')
- array([[ 0., 50., 4., 60., 0.],
- [ 2., 62., 2., 90., 0.]])
-
- """
-
- def __init__(self, tracks=None, resolution=RESOLUTION, file_format=0):
- # init variables
- if tracks is None:
- self.tracks = []
- elif isinstance(tracks, MIDITrack):
- self.tracks = [tracks]
- elif isinstance(tracks, list):
- # TODO: test if the items of the list are of type MIDITrack
- self.tracks = tracks
- else:
- raise ValueError('file_format of `tracks` not supported.')
- self.resolution = resolution # i.e. ticks per quarter note
- # format 0 stores all information in 1 track
- # format 1 has multiple tracks but plays them back simultaneously
- # TODO: format 2 has multiple tracks but plays them back one after
- # another. This type is not supported (yet).
- if file_format > 1:
- raise ValueError('Only MIDI file formats 0 and 1 supported.')
- self.format = file_format
-
- @property
- def ticks_per_quarter_note(self):
- """
- Number of ticks per quarter note.
-
- """
- return self.resolution
-
- def tempi(self, suppress_warnings=False):
- """
- Tempi of the MIDI file.
-
- Returns
- -------
- tempi : numpy array
- Array with tempi (tick, seconds per tick, cumulative time).
-
- """
- if not suppress_warnings:
- import warnings
- warnings.warn('this method will be removed soon, do not rely on '
- 'its output, rather fix issue #192 ;)')
- # create an empty tempo list
- tempo_events = []
- for i, track in enumerate(self.tracks):
- # get a list with tempo events
- track_tempo_events = [e for e in track.events if
- isinstance(e, SetTempoEvent)]
- # tempo events should be only in the first track of a MIDI file
- if track_tempo_events and i > 0:
- raise ValueError('SetTempoEvents should be only in the first '
- 'track of a MIDI file.')
- tempo_events.extend(track_tempo_events)
-
- # convert to desired format (tick, microseconds per tick)
- tempi = [(e.tick, e.microseconds_per_quarter_note /
- (1e6 * self.resolution)) for e in tempo_events]
- # make sure a tempo is set and the first tempo occurs at tick 0
- if not tempi or tempi[0][0] > 0:
- tempi.insert(0, (0, SECONDS_PER_TICK))
- # sort (just to be sure)
- tempi.sort()
- # re-iterate over the list to calculate the cumulative time
- for i, _ in enumerate(tempi):
- if i == 0:
- tempi[i] = (tempi[i][0], tempi[i][1], 0)
- else:
- ticks = tempi[i][0] - tempi[i - 1][0]
- cum_time = tempi[i - 1][2] + ticks * tempi[i - 1][1]
- tempi[i] = (tempi[i][0], tempi[i][1], cum_time)
- # return tempo
- return np.asarray(tempi, np.float)
-
- def time_signatures(self, suppress_warnings=False):
- """
- Time signatures of the MIDI file.
-
- Returns
- -------
- time_signatures : numpy array
- Array with time signatures (tick, numerator, denominator).
-
- """
- if not suppress_warnings:
- import warnings
- warnings.warn('this method will be removed soon, do not rely on '
- 'its output, rather fix issue #192 ;)')
- signatures = None
- for track in self.tracks:
- # get a list with time signature events
- time_signature_events = [e for e in track.events if
- isinstance(e, TimeSignatureEvent)]
- if signatures is None and len(time_signature_events) > 0:
- # convert to desired format
- signatures = [(e.tick, e.numerator, e.denominator)
- for e in time_signature_events]
- elif signatures is not None and len(time_signature_events) > 0:
- # time signature events should be contained only in the first
- # track of a MIDI file, thus raise an error
- raise ValueError('TimeSignatureEvent should be only in the '
- 'first track of a MIDI file.')
- # make sure a time signature is set and the first one occurs at tick 0
- if signatures is None:
- signatures = [(0, TIME_SIGNATURE)]
- if signatures[0][0] > 0:
- signatures.insert(0, (0, TIME_SIGNATURE))
- # return time signatures
- return np.asarray(signatures, dtype=np.float)
-
- def notes(self, unit='s'):
- """
- Notes of the MIDI file.
-
- Parameters
- ----------
- unit : {'s', 'seconds', 'b', 'beats', 't', 'ticks'}
- Time unit for notes, seconds ('s') beats ('b') or ticks ('t')
-
- Returns
- -------
- notes : numpy array
- Array with notes (onset time, pitch, duration, velocity, channel).
-
- """
- # list for all notes
- notes = []
- # dictionary for storing the last onset and velocity for each
- # individual note (i.e. same pitch and channel)
- sounding_notes = {}
-
- # as key for the dict use channel * 128 (max number of pitches) + pitch
- def note_hash(channel, pitch):
- """Generate a note hash."""
- return channel * 128 + pitch
-
- for track in self.tracks:
- # get a list with note events
- note_events = [e for e in track.events if isinstance(e, NoteEvent)]
- # process all events
- tick = 0
- for e in note_events:
- if tick > e.tick:
- raise AssertionError('note events must be sorted!')
- n = note_hash(e.channel, e.pitch)
- is_note_on = isinstance(e, NoteOnEvent)
- is_note_off = isinstance(e, NoteOffEvent)
- # if it's a note on event with a velocity > 0,
- if is_note_on and e.velocity > 0:
- # save the onset time and velocity
- sounding_notes[n] = (e.tick, e.velocity)
- # if it's a note off event or a note on with a velocity of 0,
- elif is_note_off or (is_note_on and e.velocity == 0):
- if n not in sounding_notes:
- import warnings
- warnings.warn("ignoring %s" % e)
- continue
- if sounding_notes[n][0] > e.tick:
- raise AssertionError('note duration must be positive')
- if sounding_notes[n][1] <= 0:
- raise AssertionError('note velocity must be positive')
- # append the note to the list
- notes.append((sounding_notes[n][0], e.pitch,
- e.tick - sounding_notes[n][0],
- sounding_notes[n][1], e.channel))
- # remove hash from dict
- del sounding_notes[n]
- else:
- raise TypeError('unexpected NoteEvent', e)
- tick = e.tick
-
- # sort the notes and convert to numpy array
- notes = np.asarray(sorted(notes), dtype=np.float)
-
- # convert onset times and durations from ticks to the requested unit
- # and return the notes
- if unit.lower() in ('t', 'ticks'):
- return notes
- elif unit.lower() in ('s', 'seconds'):
- return self._notes_in_seconds(notes)
- elif unit.lower() in ('b', 'beats'):
- return self._notes_in_beats(notes)
- else:
- raise ValueError("`unit` must be either 'seconds', 's', 'beats', "
- "'b', 'ticks', or 't' not %s." % unit)
-
- def _notes_in_beats(self, notes):
- """
- Converts onsets and offsets of notes from ticks to beats.
-
- Parameters
- ----------
- notes : numpy array or list of tuples
- Notes (onset, pitch, offset, velocity).
-
- Returns
- -------
- notes : numpy array
- Notes with onsets and offsets in beats.
-
- """
- tpq = self.ticks_per_quarter_note
- time_signatures = self.time_signatures(suppress_warnings=True)
-
- # change the second column of time_signatures to beat position of the
- # signature change, the first column is now the tick position, the
- # second column the beat position and the third column the new beat
- # unit after the signature change
- time_signatures[0, 1] = 0
-
- # quarter notes between time signature changes
- qnbtsc = np.diff(time_signatures[:, 0]) / tpq
- # beats between time signature changes
- bbtsc = qnbtsc * (time_signatures[:-1, 2] / 4.0)
- # compute beat position of each time signature change
- time_signatures[1:, 1] = bbtsc.cumsum()
-
- # iterate over all notes
- for note in notes:
- onset, _, offset, _, _ = note
- # get info about last time signature change
- tsc = time_signatures[np.argmax(time_signatures[:, 0] > onset) - 1]
- # adjust onset
- onset_ticks_since_tsc = onset - tsc[0]
- note[0] = tsc[1] + (onset_ticks_since_tsc / tpq) * (tsc[2] / 4.)
- # adjust offsets
- offset_ticks_since_tsc = offset - tsc[0]
- note[2] = tsc[1] + (offset_ticks_since_tsc / tpq) * (tsc[2] / 4.)
- # return notes
- return notes
-
- def _notes_in_seconds(self, notes):
- """
- Converts onsets and offsets of notes from ticks to seconds.
-
- Parameters
- ----------
- notes : numpy array or list of tuples
- Notes (onset, pitch, offset, velocity).
-
- Returns
- -------
- notes : numpy array
- Notes with onset and offset times in seconds.
-
- """
- # cache tempo
- tempi = self.tempi(suppress_warnings=True)
- # iterate over all notes
- for note in notes:
- onset, _, offset, _, _ = note
- # get last tempo for the onset and offset
- t_on = tempi[np.argmax(tempi[:, 0] > onset) - 1]
- t_off = tempi[np.argmax(tempi[:, 0] > offset) - 1]
- # adjust the note onset and offset
- note[0] = (onset - t_on[0]) * t_on[1] + t_on[2]
- note[2] = (offset - t_off[0]) * t_off[1] + t_off[2]
- # return notes
- return notes
-
- # methods for writing MIDI stuff
- @property
- def data_stream(self):
- """
- MIDI data stream representation of the MIDI file.
-
- """
- # prepare data
- data = bytearray()
- # generate a MIDI header
- data.extend(b'MThd')
- data.extend(struct.pack(">LHHH", 6, self.format, len(self.tracks),
- self.resolution))
- # append the tracks
- for track in self.tracks:
- data.extend(track.data_stream)
- # return the data
- return data
-
- def write(self, midi_file):
- """
- Write a MIDI file.
-
- Parameters
- ----------
- midi_file : str
- The MIDI file name.
-
- """
- # if we get a filename, open the file
- if not hasattr(midi_file, 'write'):
- midi_file = open(midi_file, 'wb')
- # write the MIDI stream
- midi_file.write(self.data_stream)
- # close the file
- midi_file.close()
-
- @classmethod
- def from_file(cls, midi_file):
- """
- Create a MIDI file instance from a .mid file.
-
- Parameters
- ----------
- midi_file : str
- Name of the .mid file to load.
-
- Returns
- -------
- :class:`MIDIFile` instance
- :class:`MIDIFile` instance
-
- """
- tracks = []
- resolution = None
- midi_format = None
- with open(midi_file, 'rb') as midi_file:
- # read in file header
- # first four bytes are MIDI header
- chunk = midi_file.read(4)
- if chunk != b'MThd':
- raise TypeError("Bad header in MIDI file: %s", chunk)
- # next four bytes are header size
- # next two bytes specify the format version
- # next two bytes specify the number of tracks
- # next two bytes specify the resolution/PPQ/Parts Per Quarter
- # (in other words, how many ticks per quarter note)
- data = struct.unpack(">LHHH", midi_file.read(10))
- header_size = data[0]
- midi_format = data[1]
- num_tracks = data[2]
- resolution = data[3]
- # if the top bit of the resolution word is 0, the following 15 bits
- # describe the time division in ticks per beat
- if resolution & 0x8000 == 0:
- resolution = resolution
- # otherwise the following 15 bits describe the time division in
- # frames per second
- else:
- # from http://www.sonicspot.com/guide/midifiles.html:
- # Frames per second is defined by breaking the remaining 15
- # bytes into two values. The top 7 bits (bit mask 0x7F00)
- # define a value for the number of SMPTE frames and can be
- # 24, 25, 29 (for 29.97 fps) or 30. The remaining byte
- # (bit mask 0x00FF) defines how many clock ticks or track delta
- # positions there are per frame. So a time division example of
- # 0x9978 could be broken down into it's three parts: the top
- # bit is one, so it is in SMPTE frames per second format, the
- # following 7 bits have a value of 25 (0x19) and the bottom
- # byte has a value of 120 (0x78). This means the example plays
- # at 24(?) frames per second SMPTE time and has 120 ticks per
- # frame.
- raise NotImplementedError("SMPTE resolution not implemented.")
- # skip the remaining part of the header
- if header_size > HEADER_SIZE:
- midi_file.read(header_size - HEADER_SIZE)
- # read in all tracks
- for _ in range(num_tracks):
- # read in one track and append it to the tracks list
- track = MIDITrack.from_stream(midi_file)
- tracks.append(track)
- if resolution is None or midi_format is None:
- raise IOError('unable to read MIDI file %s.' % midi_file)
- # return a newly created object
- return cls(tracks=tracks, resolution=resolution,
- file_format=midi_format)
-
- @classmethod
- def from_notes(cls, notes, tempo=TEMPO, time_signature=TIME_SIGNATURE,
- resolution=RESOLUTION):
- """
- Create a MIDIFile from the given notes.
-
- Parameters
- ----------
- notes : numpy array
- Array with the notes, one per row. The columns must be:
- (onset time, pitch, duration, velocity, [channel]).
- tempo : float, optional
- Tempo of the MIDI track, given in beats per minute (bpm).
- time_signature : tuple, optional
- Time signature of the track, e.g. (4, 4) for 4/4.
- resolution : int
- Resolution (i.e. ticks per quarter note) of the MIDI track.
-
- Returns
- -------
- :class:`MIDIFile` instance
- :class:`MIDIFile` instance with all notes collected in one track.
-
- Notes
- -----
- All note events (including the generated tempo and time signature
- events) are written into a single track (i.e. MIDI file format 0).
-
- """
- # create a new track from the notes and then a MIDIFile instance
- return cls(MIDITrack.from_notes(notes, tempo, time_signature,
- resolution))
-
- @staticmethod
- def add_arguments(parser, length=None, velocity=None, channel=None):
- """
- Add MIDI related arguments to an existing parser object.
-
- Parameters
- ----------
- parser : argparse parser instance
- Existing argparse parser object.
- length : float, optional
- Default length of the notes [seconds].
- velocity : int, optional
- Default velocity of the notes.
- channel : int, optional
- Default channel of the notes.
-
- Returns
- -------
- argparse argument group
- MIDI argument parser group object.
-
- """
- # add MIDI related options to the existing parser
- g = parser.add_argument_group('MIDI arguments')
- g.add_argument('--midi', action='store_true', help='save as MIDI')
- if length is not None:
- g.add_argument('--note_length', action='store', type=float,
- default=length,
- help='set the note length [default=%(default).2f]')
- if velocity is not None:
- g.add_argument('--note_velocity', action='store', type=int,
- default=velocity,
- help='set the note velocity [default=%(default)i]')
- if channel is not None:
- g.add_argument('--note_channel', action='store', type=int,
- default=channel,
- help='set the note channel [default=%(default)i]')
- # return the argument group so it can be modified if needed
- return g
-
-
-def process_notes(data, output=None):
- """
- This is a simple processing function. It either loads the notes from a MIDI
- file and or writes the notes to a file.
-
- The behaviour depends on the presence of the `output` argument, if 'None'
- is given, the notes are read, otherwise the notes are written to file.
-
- Parameters
- ----------
- data : str or numpy array
- MIDI file to be loaded (if `output` is 'None') / notes to be written.
- output : str, optional
- Output file name. If set, the notes given by `data` are written.
-
- Returns
- -------
- notes : numpy array
- Notes read/written.
-
- """
- if output is None:
- # load the notes
- return MIDIFile.from_file(data).notes()
- MIDIFile.from_notes(data).write(output)
- return data
diff --git a/spaces/MathysL/AutoGPT4/tests/unit/test_browse_scrape_text.py b/spaces/MathysL/AutoGPT4/tests/unit/test_browse_scrape_text.py
deleted file mode 100644
index fea5ebfc05d466c7cb5711b5ac10e2ea102ddc45..0000000000000000000000000000000000000000
--- a/spaces/MathysL/AutoGPT4/tests/unit/test_browse_scrape_text.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Generated by CodiumAI
-
-import requests
-
-from autogpt.commands.web_requests import scrape_text
-
-"""
-Code Analysis
-
-Objective:
-The objective of the "scrape_text" function is to scrape the text content from
-a given URL and return it as a string, after removing any unwanted HTML tags and scripts.
-
-Inputs:
-- url: a string representing the URL of the webpage to be scraped.
-
-Flow:
-1. Send a GET request to the given URL using the requests library and the user agent header from the config file.
-2. Check if the response contains an HTTP error. If it does, return an error message.
-3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags.
-4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup.
-5. Split the text into lines and then into chunks, removing any extra whitespace.
-6. Join the chunks into a single string with newline characters between them.
-7. Return the cleaned text.
-
-Outputs:
-- A string representing the cleaned text content of the webpage.
-
-Additional aspects:
-- The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively.
-- The function removes script and style tags from the HTML to avoid including unwanted content in the text output.
-- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text.
-"""
-
-
-class TestScrapeText:
- # Tests that scrape_text() returns the expected text when given a valid URL.
- def test_scrape_text_with_valid_url(self, mocker):
- # Mock the requests.get() method to return a response with expected text
- expected_text = "This is some sample text"
- mock_response = mocker.Mock()
- mock_response.status_code = 200
- mock_response.text = f"
{expected_text}
"
- mocker.patch("requests.Session.get", return_value=mock_response)
-
- # Call the function with a valid URL and assert that it returns the expected text
- url = "http://www.example.com"
- assert scrape_text(url) == expected_text
-
- # Tests that the function returns an error message when an invalid or unreachable url is provided.
- def test_invalid_url(self, mocker):
- # Mock the requests.get() method to raise an exception
- mocker.patch(
- "requests.Session.get", side_effect=requests.exceptions.RequestException
- )
-
- # Call the function with an invalid URL and assert that it returns an error message
- url = "http://www.invalidurl.com"
- error_message = scrape_text(url)
- assert "Error:" in error_message
-
- # Tests that the function returns an empty string when the html page contains no text to be scraped.
- def test_no_text(self, mocker):
- # Mock the requests.get() method to return a response with no text
- mock_response = mocker.Mock()
- mock_response.status_code = 200
- mock_response.text = ""
- mocker.patch("requests.Session.get", return_value=mock_response)
-
- # Call the function with a valid URL and assert that it returns an empty string
- url = "http://www.example.com"
- assert scrape_text(url) == ""
-
- # Tests that the function returns an error message when the response status code is an http error (>=400).
- def test_http_error(self, mocker):
- # Mock the requests.get() method to return a response with a 404 status code
- mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404))
-
- # Call the function with a URL
- result = scrape_text("https://www.example.com")
-
- # Check that the function returns an error message
- assert result == "Error: HTTP 404 error"
-
- # Tests that scrape_text() properly handles HTML tags.
- def test_scrape_text_with_html_tags(self, mocker):
- # Create a mock response object with HTML containing tags
- html = "
This is bold text.
"
- mock_response = mocker.Mock()
- mock_response.status_code = 200
- mock_response.text = html
- mocker.patch("requests.Session.get", return_value=mock_response)
-
- # Call the function with a URL
- result = scrape_text("https://www.example.com")
-
- # Check that the function properly handles HTML tags
- assert result == "This is bold text."
diff --git a/spaces/MehdiAmirate/Botv2/Dockerfile b/spaces/MehdiAmirate/Botv2/Dockerfile
deleted file mode 100644
index bc644e32df0be9e7711cf8993f63dcf7a9f48558..0000000000000000000000000000000000000000
--- a/spaces/MehdiAmirate/Botv2/Dockerfile
+++ /dev/null
@@ -1,19 +0,0 @@
-FROM python:3.9
-
-WORKDIR /code
-
-COPY ./requirements.txt /code/requirements.txt
-RUN python3 -m pip install --no-cache-dir --upgrade pip
-RUN python3 -m pip install --no-cache-dir --upgrade -r /code/requirements.txt
-RUN pip install langchain
-
-RUN mkdir /.cache
-RUN chmod 777 /.cache
-RUN mkdir .chroma
-RUN chmod 777 .chroma
-
-COPY . .
-
-ENV BOKEH_ALLOW_WS_ORIGIN=mehdiamirate-botv2.hf.space
-
-CMD ["panel", "serve", "/code/Botv2.ipynb", "--address", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "MehdiAmirate-Botv2.hf.space", "--allow-websocket-origin", "0.0.0.0:7860"]
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/merge_cells.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/merge_cells.py
deleted file mode 100644
index 48ca8cc0a8aca8432835bd760c0403a3c35b34cf..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/merge_cells.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from abc import abstractmethod
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from ..cnn import ConvModule
-
-
-class BaseMergeCell(nn.Module):
- """The basic class for cells used in NAS-FPN and NAS-FCOS.
-
- BaseMergeCell takes 2 inputs. After applying convolution
- on them, they are resized to the target size. Then,
- they go through binary_op, which depends on the type of cell.
- If with_out_conv is True, the result of output will go through
- another convolution layer.
-
- Args:
- in_channels (int): number of input channels in out_conv layer.
- out_channels (int): number of output channels in out_conv layer.
- with_out_conv (bool): Whether to use out_conv layer
- out_conv_cfg (dict): Config dict for convolution layer, which should
- contain "groups", "kernel_size", "padding", "bias" to build
- out_conv layer.
- out_norm_cfg (dict): Config dict for normalization layer in out_conv.
- out_conv_order (tuple): The order of conv/norm/activation layers in
- out_conv.
- with_input1_conv (bool): Whether to use convolution on input1.
- with_input2_conv (bool): Whether to use convolution on input2.
- input_conv_cfg (dict): Config dict for building input1_conv layer and
- input2_conv layer, which is expected to contain the type of
- convolution.
- Default: None, which means using conv2d.
- input_norm_cfg (dict): Config dict for normalization layer in
- input1_conv and input2_conv layer. Default: None.
- upsample_mode (str): Interpolation method used to resize the output
- of input1_conv and input2_conv to target size. Currently, we
- support ['nearest', 'bilinear']. Default: 'nearest'.
- """
-
- def __init__(self,
- fused_channels=256,
- out_channels=256,
- with_out_conv=True,
- out_conv_cfg=dict(
- groups=1, kernel_size=3, padding=1, bias=True),
- out_norm_cfg=None,
- out_conv_order=('act', 'conv', 'norm'),
- with_input1_conv=False,
- with_input2_conv=False,
- input_conv_cfg=None,
- input_norm_cfg=None,
- upsample_mode='nearest'):
- super(BaseMergeCell, self).__init__()
- assert upsample_mode in ['nearest', 'bilinear']
- self.with_out_conv = with_out_conv
- self.with_input1_conv = with_input1_conv
- self.with_input2_conv = with_input2_conv
- self.upsample_mode = upsample_mode
-
- if self.with_out_conv:
- self.out_conv = ConvModule(
- fused_channels,
- out_channels,
- **out_conv_cfg,
- norm_cfg=out_norm_cfg,
- order=out_conv_order)
-
- self.input1_conv = self._build_input_conv(
- out_channels, input_conv_cfg,
- input_norm_cfg) if with_input1_conv else nn.Sequential()
- self.input2_conv = self._build_input_conv(
- out_channels, input_conv_cfg,
- input_norm_cfg) if with_input2_conv else nn.Sequential()
-
- def _build_input_conv(self, channel, conv_cfg, norm_cfg):
- return ConvModule(
- channel,
- channel,
- 3,
- padding=1,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- bias=True)
-
- @abstractmethod
- def _binary_op(self, x1, x2):
- pass
-
- def _resize(self, x, size):
- if x.shape[-2:] == size:
- return x
- elif x.shape[-2:] < size:
- return F.interpolate(x, size=size, mode=self.upsample_mode)
- else:
- assert x.shape[-2] % size[-2] == 0 and x.shape[-1] % size[-1] == 0
- kernel_size = x.shape[-1] // size[-1]
- x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size)
- return x
-
- def forward(self, x1, x2, out_size=None):
- assert x1.shape[:2] == x2.shape[:2]
- assert out_size is None or len(out_size) == 2
- if out_size is None: # resize to larger one
- out_size = max(x1.size()[2:], x2.size()[2:])
-
- x1 = self.input1_conv(x1)
- x2 = self.input2_conv(x2)
-
- x1 = self._resize(x1, out_size)
- x2 = self._resize(x2, out_size)
-
- x = self._binary_op(x1, x2)
- if self.with_out_conv:
- x = self.out_conv(x)
- return x
-
-
-class SumCell(BaseMergeCell):
-
- def __init__(self, in_channels, out_channels, **kwargs):
- super(SumCell, self).__init__(in_channels, out_channels, **kwargs)
-
- def _binary_op(self, x1, x2):
- return x1 + x2
-
-
-class ConcatCell(BaseMergeCell):
-
- def __init__(self, in_channels, out_channels, **kwargs):
- super(ConcatCell, self).__init__(in_channels * 2, out_channels,
- **kwargs)
-
- def _binary_op(self, x1, x2):
- ret = torch.cat([x1, x2], dim=1)
- return ret
-
-
-class GlobalPoolingCell(BaseMergeCell):
-
- def __init__(self, in_channels=None, out_channels=None, **kwargs):
- super().__init__(in_channels, out_channels, **kwargs)
- self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
-
- def _binary_op(self, x1, x2):
- x2_att = self.global_pool(x2).sigmoid()
- return x2 + x2_att * x1
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/pascal_context.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/pascal_context.py
deleted file mode 100644
index 541a63c66a13fb16fd52921e755715ad8d078fdd..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/pascal_context.py
+++ /dev/null
@@ -1,103 +0,0 @@
-import os.path as osp
-
-from .builder import DATASETS
-from .custom import CustomDataset
-
-
-@DATASETS.register_module()
-class PascalContextDataset(CustomDataset):
- """PascalContext dataset.
-
- In segmentation map annotation for PascalContext, 0 stands for background,
- which is included in 60 categories. ``reduce_zero_label`` is fixed to
- False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
- fixed to '.png'.
-
- Args:
- split (str): Split txt file for PascalContext.
- """
-
- CLASSES = ('background', 'aeroplane', 'bag', 'bed', 'bedclothes', 'bench',
- 'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus',
- 'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth',
- 'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence',
- 'floor', 'flower', 'food', 'grass', 'ground', 'horse',
- 'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person',
- 'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep',
- 'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table',
- 'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water',
- 'window', 'wood')
-
- PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
- [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
- [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
- [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
- [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
- [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
- [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
- [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
- [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
- [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
- [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
- [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
- [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
- [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
- [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
-
- def __init__(self, split, **kwargs):
- super(PascalContextDataset, self).__init__(
- img_suffix='.jpg',
- seg_map_suffix='.png',
- split=split,
- reduce_zero_label=False,
- **kwargs)
- assert osp.exists(self.img_dir) and self.split is not None
-
-
-@DATASETS.register_module()
-class PascalContextDataset59(CustomDataset):
- """PascalContext dataset.
-
- In segmentation map annotation for PascalContext, 0 stands for background,
- which is included in 60 categories. ``reduce_zero_label`` is fixed to
- False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
- fixed to '.png'.
-
- Args:
- split (str): Split txt file for PascalContext.
- """
-
- CLASSES = ('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle',
- 'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet',
- 'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow',
- 'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower',
- 'food', 'grass', 'ground', 'horse', 'keyboard', 'light',
- 'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform',
- 'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk',
- 'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train',
- 'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood')
-
- PALETTE = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3],
- [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230],
- [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61],
- [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140],
- [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200],
- [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71],
- [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92],
- [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6],
- [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8],
- [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8],
- [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255],
- [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140],
- [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0],
- [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0],
- [0, 235, 255], [0, 173, 255], [31, 0, 255]]
-
- def __init__(self, split, **kwargs):
- super(PascalContextDataset59, self).__init__(
- img_suffix='.jpg',
- seg_map_suffix='.png',
- split=split,
- reduce_zero_label=True,
- **kwargs)
- assert osp.exists(self.img_dir) and self.split is not None
diff --git a/spaces/MoonQiu/LongerCrafter/lvdm/modules/networks/openaimodel3d.py b/spaces/MoonQiu/LongerCrafter/lvdm/modules/networks/openaimodel3d.py
deleted file mode 100644
index 276f336d0217a87f8f31101a879e0b049dfa0dbb..0000000000000000000000000000000000000000
--- a/spaces/MoonQiu/LongerCrafter/lvdm/modules/networks/openaimodel3d.py
+++ /dev/null
@@ -1,581 +0,0 @@
-from functools import partial
-from abc import abstractmethod
-import torch
-import torch.nn as nn
-from einops import rearrange
-import torch.nn.functional as F
-from lvdm.models.utils_diffusion import timestep_embedding
-from lvdm.common import checkpoint
-from lvdm.basics import (
- zero_module,
- conv_nd,
- linear,
- avg_pool_nd,
- normalization
-)
-from lvdm.modules.attention import SpatialTransformer, TemporalTransformer
-
-
-class TimestepBlock(nn.Module):
- """
- Any module where forward() takes timestep embeddings as a second argument.
- """
- @abstractmethod
- def forward(self, x, emb):
- """
- Apply the module to `x` given `emb` timestep embeddings.
- """
-
-
-class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
- """
- A sequential module that passes timestep embeddings to the children that
- support it as an extra input.
- """
-
- def forward(self, x, emb, context=None, batch_size=None):
- for layer in self:
- if isinstance(layer, TimestepBlock):
- x = layer(x, emb, batch_size)
- elif isinstance(layer, SpatialTransformer):
- x = layer(x, context)
- elif isinstance(layer, TemporalTransformer):
- x = rearrange(x, '(b f) c h w -> b c f h w', b=batch_size)
- x = layer(x, context)
- x = rearrange(x, 'b c f h w -> (b f) c h w')
- else:
- x = layer(x,)
- return x
-
-
-class Downsample(nn.Module):
- """
- A downsampling layer with an optional convolution.
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- downsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- stride = 2 if dims != 3 else (1, 2, 2)
- if use_conv:
- self.op = conv_nd(
- dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
- )
- else:
- assert self.channels == self.out_channels
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- return self.op(x)
-
-
-class Upsample(nn.Module):
- """
- An upsampling layer with an optional convolution.
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- upsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- if use_conv:
- self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- if self.dims == 3:
- x = F.interpolate(x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode='nearest')
- else:
- x = F.interpolate(x, scale_factor=2, mode='nearest')
- if self.use_conv:
- x = self.conv(x)
- return x
-
-
-class ResBlock(TimestepBlock):
- """
- A residual block that can optionally change the number of channels.
- :param channels: the number of input channels.
- :param emb_channels: the number of timestep embedding channels.
- :param dropout: the rate of dropout.
- :param out_channels: if specified, the number of out channels.
- :param use_conv: if True and out_channels is specified, use a spatial
- convolution instead of a smaller 1x1 convolution to change the
- channels in the skip connection.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param up: if True, use this block for upsampling.
- :param down: if True, use this block for downsampling.
- """
-
- def __init__(
- self,
- channels,
- emb_channels,
- dropout,
- out_channels=None,
- use_scale_shift_norm=False,
- dims=2,
- use_checkpoint=False,
- use_conv=False,
- up=False,
- down=False,
- use_temporal_conv=False,
- tempspatial_aware=False
- ):
- super().__init__()
- self.channels = channels
- self.emb_channels = emb_channels
- self.dropout = dropout
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_checkpoint = use_checkpoint
- self.use_scale_shift_norm = use_scale_shift_norm
- self.use_temporal_conv = use_temporal_conv
-
- self.in_layers = nn.Sequential(
- normalization(channels),
- nn.SiLU(),
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
- )
-
- self.updown = up or down
-
- if up:
- self.h_upd = Upsample(channels, False, dims)
- self.x_upd = Upsample(channels, False, dims)
- elif down:
- self.h_upd = Downsample(channels, False, dims)
- self.x_upd = Downsample(channels, False, dims)
- else:
- self.h_upd = self.x_upd = nn.Identity()
-
- self.emb_layers = nn.Sequential(
- nn.SiLU(),
- nn.Linear(
- emb_channels,
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
- ),
- )
- self.out_layers = nn.Sequential(
- normalization(self.out_channels),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(nn.Conv2d(self.out_channels, self.out_channels, 3, padding=1)),
- )
-
- if self.out_channels == channels:
- self.skip_connection = nn.Identity()
- elif use_conv:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1)
- else:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
-
- if self.use_temporal_conv:
- self.temopral_conv = TemporalConvBlock(
- self.out_channels,
- self.out_channels,
- dropout=0.1,
- spatial_aware=tempspatial_aware
- )
-
- def forward(self, x, emb, batch_size=None):
- """
- Apply the block to a Tensor, conditioned on a timestep embedding.
- :param x: an [N x C x ...] Tensor of features.
- :param emb: an [N x emb_channels] Tensor of timestep embeddings.
- :return: an [N x C x ...] Tensor of outputs.
- """
- input_tuple = (x, emb,)
- if batch_size:
- forward_batchsize = partial(self._forward, batch_size=batch_size)
- return checkpoint(forward_batchsize, input_tuple, self.parameters(), self.use_checkpoint)
- return checkpoint(self._forward, input_tuple, self.parameters(), self.use_checkpoint)
-
- def _forward(self, x, emb, batch_size=None,):
- if self.updown:
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
- h = in_rest(x)
- h = self.h_upd(h)
- x = self.x_upd(x)
- h = in_conv(h)
- else:
- h = self.in_layers(x)
- emb_out = self.emb_layers(emb).type(h.dtype)
- while len(emb_out.shape) < len(h.shape):
- emb_out = emb_out[..., None]
- if self.use_scale_shift_norm:
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
- scale, shift = torch.chunk(emb_out, 2, dim=1)
- h = out_norm(h) * (1 + scale) + shift
- h = out_rest(h)
- else:
- h = h + emb_out
- h = self.out_layers(h)
- h = self.skip_connection(x) + h
-
- if self.use_temporal_conv and batch_size:
- h = rearrange(h, '(b t) c h w -> b c t h w', b=batch_size)
- h = self.temopral_conv(h)
- h = rearrange(h, 'b c t h w -> (b t) c h w')
- return h
-
-
-class TemporalConvBlock(nn.Module):
- """
- Adapted from modelscope: https://github.com/modelscope/modelscope/blob/master/modelscope/models/multi_modal/video_synthesis/unet_sd.py
- """
-
- def __init__(self, in_channels, out_channels=None, dropout=0.0, spatial_aware=False):
- super(TemporalConvBlock, self).__init__()
- if out_channels is None:
- out_channels = in_channels
- self.in_channels = in_channels
- self.out_channels = out_channels
- kernel_shape = (3, 1, 1) if not spatial_aware else (3, 3, 3)
- padding_shape = (1, 0, 0) if not spatial_aware else (1, 1, 1)
-
- # conv layers
- self.conv1 = nn.Sequential(
- nn.GroupNorm(32, in_channels), nn.SiLU(),
- nn.Conv3d(in_channels, out_channels, kernel_shape, padding=padding_shape))
- self.conv2 = nn.Sequential(
- nn.GroupNorm(32, out_channels), nn.SiLU(), nn.Dropout(dropout),
- nn.Conv3d(out_channels, in_channels, kernel_shape, padding=padding_shape))
- self.conv3 = nn.Sequential(
- nn.GroupNorm(32, out_channels), nn.SiLU(), nn.Dropout(dropout),
- nn.Conv3d(out_channels, in_channels, (3, 1, 1), padding=(1, 0, 0)))
- self.conv4 = nn.Sequential(
- nn.GroupNorm(32, out_channels), nn.SiLU(), nn.Dropout(dropout),
- nn.Conv3d(out_channels, in_channels, (3, 1, 1), padding=(1, 0, 0)))
-
- # zero out the last layer params,so the conv block is identity
- nn.init.zeros_(self.conv4[-1].weight)
- nn.init.zeros_(self.conv4[-1].bias)
-
- def forward(self, x):
- identity = x
- x = self.conv1(x)
- x = self.conv2(x)
- x = self.conv3(x)
- x = self.conv4(x)
-
- return x + identity
-
-
-class UNetModel(nn.Module):
- """
- The full UNet model with attention and timestep embedding.
- :param in_channels: in_channels in the input Tensor.
- :param model_channels: base channel count for the model.
- :param out_channels: channels in the output Tensor.
- :param num_res_blocks: number of residual blocks per downsample.
- :param attention_resolutions: a collection of downsample rates at which
- attention will take place. May be a set, list, or tuple.
- For example, if this contains 4, then at 4x downsampling, attention
- will be used.
- :param dropout: the dropout probability.
- :param channel_mult: channel multiplier for each level of the UNet.
- :param conv_resample: if True, use learned convolutions for upsampling and
- downsampling.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param num_classes: if specified (as an int), then this model will be
- class-conditional with `num_classes` classes.
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
- :param num_heads: the number of attention heads in each attention layer.
- :param num_heads_channels: if specified, ignore num_heads and instead use
- a fixed channel width per attention head.
- :param num_heads_upsample: works with num_heads to set a different number
- of heads for upsampling. Deprecated.
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
- :param resblock_updown: use residual blocks for up/downsampling.
- """
-
- def __init__(self,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0.0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- context_dim=None,
- use_scale_shift_norm=False,
- resblock_updown=False,
- num_heads=-1,
- num_head_channels=-1,
- transformer_depth=1,
- use_linear=False,
- use_checkpoint=False,
- temporal_conv=False,
- tempspatial_aware=False,
- temporal_attention=True,
- temporal_selfatt_only=True,
- use_relative_position=True,
- use_causal_attention=False,
- temporal_length=None,
- use_fp16=False,
- addition_attention=False,
- use_image_attention=False,
- temporal_transformer_depth=1,
- fps_cond=False,
- ):
- super(UNetModel, self).__init__()
- if num_heads == -1:
- assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
- if num_head_channels == -1:
- assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
-
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.temporal_attention = temporal_attention
- time_embed_dim = model_channels * 4
- self.use_checkpoint = use_checkpoint
- self.dtype = torch.float16 if use_fp16 else torch.float32
- self.addition_attention=addition_attention
- self.use_image_attention = use_image_attention
- self.fps_cond=fps_cond
-
-
-
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
- if self.fps_cond:
- self.fps_embedding = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(conv_nd(dims, in_channels, model_channels, 3, padding=1))
- ]
- )
- if self.addition_attention:
- self.init_attn=TimestepEmbedSequential(
- TemporalTransformer(
- model_channels,
- n_heads=8,
- d_head=num_head_channels,
- depth=transformer_depth,
- context_dim=context_dim,
- use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only,
- causal_attention=use_causal_attention, relative_position=use_relative_position,
- temporal_length=temporal_length))
-
- input_block_chans = [model_channels]
- ch = model_channels
- ds = 1
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(ch, time_embed_dim, dropout,
- out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware,
- use_temporal_conv=temporal_conv
- )
- ]
- ch = mult * model_channels
- if ds in attention_resolutions:
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- layers.append(
- SpatialTransformer(ch, num_heads, dim_head,
- depth=transformer_depth, context_dim=context_dim, use_linear=use_linear,
- use_checkpoint=use_checkpoint, disable_self_attn=False,
- img_cross_attention=self.use_image_attention
- )
- )
- if self.temporal_attention:
- layers.append(
- TemporalTransformer(ch, num_heads, dim_head,
- depth=temporal_transformer_depth, context_dim=context_dim, use_linear=use_linear,
- use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only,
- causal_attention=use_causal_attention, relative_position=use_relative_position,
- temporal_length=temporal_length
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(ch, time_embed_dim, dropout,
- out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True
- )
- if resblock_updown
- else Downsample(ch, conv_resample, dims=dims, out_channels=out_ch)
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
-
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- layers = [
- ResBlock(ch, time_embed_dim, dropout,
- dims=dims, use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware,
- use_temporal_conv=temporal_conv
- ),
- SpatialTransformer(ch, num_heads, dim_head,
- depth=transformer_depth, context_dim=context_dim, use_linear=use_linear,
- use_checkpoint=use_checkpoint, disable_self_attn=False,
- img_cross_attention=self.use_image_attention
- )
- ]
- if self.temporal_attention:
- layers.append(
- TemporalTransformer(ch, num_heads, dim_head,
- depth=temporal_transformer_depth, context_dim=context_dim, use_linear=use_linear,
- use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only,
- causal_attention=use_causal_attention, relative_position=use_relative_position,
- temporal_length=temporal_length
- )
- )
- layers.append(
- ResBlock(ch, time_embed_dim, dropout,
- dims=dims, use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware,
- use_temporal_conv=temporal_conv
- )
- )
- self.middle_block = TimestepEmbedSequential(*layers)
-
- self.output_blocks = nn.ModuleList([])
- for level, mult in list(enumerate(channel_mult))[::-1]:
- for i in range(num_res_blocks + 1):
- ich = input_block_chans.pop()
- layers = [
- ResBlock(ch + ich, time_embed_dim, dropout,
- out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware,
- use_temporal_conv=temporal_conv
- )
- ]
- ch = model_channels * mult
- if ds in attention_resolutions:
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- layers.append(
- SpatialTransformer(ch, num_heads, dim_head,
- depth=transformer_depth, context_dim=context_dim, use_linear=use_linear,
- use_checkpoint=use_checkpoint, disable_self_attn=False,
- img_cross_attention=self.use_image_attention
- )
- )
- if self.temporal_attention:
- layers.append(
- TemporalTransformer(ch, num_heads, dim_head,
- depth=temporal_transformer_depth, context_dim=context_dim, use_linear=use_linear,
- use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only,
- causal_attention=use_causal_attention, relative_position=use_relative_position,
- temporal_length=temporal_length
- )
- )
- if level and i == num_res_blocks:
- out_ch = ch
- layers.append(
- ResBlock(ch, time_embed_dim, dropout,
- out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- up=True
- )
- if resblock_updown
- else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
- )
- ds //= 2
- self.output_blocks.append(TimestepEmbedSequential(*layers))
-
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
- )
-
- def forward(self, x, timesteps, context=None, features_adapter=None, fps=16, **kwargs):
- t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
- emb = self.time_embed(t_emb)
-
- if self.fps_cond:
- if type(fps) == int:
- fps = torch.full_like(timesteps, fps)
- fps_emb = timestep_embedding(fps,self.model_channels, repeat_only=False)
- emb += self.fps_embedding(fps_emb)
-
- b,_,t,_,_ = x.shape
- ## repeat t times for context [(b t) 77 768] & time embedding
- if len(context.shape) < 4:
- context = context.repeat_interleave(repeats=t, dim=0)
- else:
- context = context.view(-1, context.shape[2], context.shape[3])
- # context = context.repeat_interleave(repeats=t, dim=0)
- emb = emb.repeat_interleave(repeats=t, dim=0)
-
- ## always in shape (b t) c h w, except for temporal layer
- x = rearrange(x, 'b c t h w -> (b t) c h w')
-
- h = x.type(self.dtype)
- adapter_idx = 0
- hs = []
- for id, module in enumerate(self.input_blocks):
- h = module(h, emb, context=context, batch_size=b)
- if id ==0 and self.addition_attention:
- h = self.init_attn(h, emb, context=context, batch_size=b)
- ## plug-in adapter features
- if ((id+1)%3 == 0) and features_adapter is not None:
- h = h + features_adapter[adapter_idx]
- adapter_idx += 1
- hs.append(h)
- if features_adapter is not None:
- assert len(features_adapter)==adapter_idx, 'Wrong features_adapter'
-
- h = self.middle_block(h, emb, context=context, batch_size=b)
- for module in self.output_blocks:
- h = torch.cat([h, hs.pop()], dim=1)
- h = module(h, emb, context=context, batch_size=b)
- h = h.type(x.dtype)
- y = self.out(h)
-
- # reshape back to (b c t h w)
- y = rearrange(y, '(b t) c h w -> b c t h w', b=b)
- return y
-
\ No newline at end of file
diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/psenet/psenet_resnet50_fpnf_600e_ctw1500.py b/spaces/Mountchicken/MAERec-Gradio/configs/textdet/psenet/psenet_resnet50_fpnf_600e_ctw1500.py
deleted file mode 100644
index a6d97b99bbcb12008433851356e67b6dcd779b15..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/psenet/psenet_resnet50_fpnf_600e_ctw1500.py
+++ /dev/null
@@ -1,52 +0,0 @@
-_base_ = [
- '_base_psenet_resnet50_fpnf.py',
- '../_base_/datasets/ctw1500.py',
- '../_base_/default_runtime.py',
- '../_base_/schedules/schedule_adam_600e.py',
-]
-
-# optimizer
-optim_wrapper = dict(optimizer=dict(lr=1e-4))
-train_cfg = dict(val_interval=40)
-param_scheduler = [
- dict(type='MultiStepLR', milestones=[200, 400], end=600),
-]
-
-# dataset settings
-ctw1500_textdet_train = _base_.ctw1500_textdet_train
-ctw1500_textdet_test = _base_.ctw1500_textdet_test
-
-test_pipeline_ctw = [
- dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
- dict(type='Resize', scale=(1280, 1280), keep_ratio=True),
- dict(
- type='LoadOCRAnnotations',
- with_polygon=True,
- with_bbox=True,
- with_label=True),
- dict(
- type='PackTextDetInputs',
- meta_keys=('img_path', 'ori_shape', 'img_shape', 'scale_factor'))
-]
-
-# pipeline settings
-ctw1500_textdet_train.pipeline = _base_.train_pipeline
-ctw1500_textdet_test.pipeline = test_pipeline_ctw
-
-train_dataloader = dict(
- batch_size=16,
- num_workers=8,
- persistent_workers=False,
- sampler=dict(type='DefaultSampler', shuffle=True),
- dataset=ctw1500_textdet_train)
-
-val_dataloader = dict(
- batch_size=1,
- num_workers=1,
- persistent_workers=False,
- sampler=dict(type='DefaultSampler', shuffle=False),
- dataset=ctw1500_textdet_test)
-
-test_dataloader = val_dataloader
-
-auto_scale_lr = dict(base_batch_size=64 * 4)
diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/albert/README.md b/spaces/NCTCMumbai/NCTC/models/official/nlp/albert/README.md
deleted file mode 100644
index cfb726c90ef9a638d5fd0485e341c232a86bdac2..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/nlp/albert/README.md
+++ /dev/null
@@ -1,332 +0,0 @@
-# ALBERT (ALBERT: A Lite BERT for Self-supervised Learning of Language Representations)
-
-The academic paper which describes ALBERT in detail and provides full results on
-a number of tasks can be found here: https://arxiv.org/abs/1909.11942.
-
-This repository contains TensorFlow 2.x implementation for ALBERT.
-
-## Contents
- * [Contents](#contents)
- * [Pre-trained Models](#pre-trained-models)
- * [Restoring from Checkpoints](#restoring-from-checkpoints)
- * [Set Up](#set-up)
- * [Process Datasets](#process-datasets)
- * [Fine-tuning with BERT](#fine-tuning-with-bert)
- * [Cloud GPUs and TPUs](#cloud-gpus-and-tpus)
- * [Sentence and Sentence-pair Classification Tasks](#sentence-and-sentence-pair-classification-tasks)
- * [SQuAD 1.1](#squad-1.1)
-
-
-## Pre-trained Models
-
-We released both checkpoints and tf.hub modules as the pretrained models for
-fine-tuning. They are TF 2.x compatible and are converted from the ALBERT v2
-checkpoints released in TF 1.x official ALBERT repository
-[google-research/albert](https://github.com/google-research/albert)
-in order to keep consistent with ALBERT paper.
-
-Our current released checkpoints are exactly the same as TF 1.x official ALBERT
-repository.
-
-### Access to Pretrained Checkpoints
-
-Pretrained checkpoints can be found in the following links:
-
-**Note: We implemented ALBERT using Keras functional-style networks in [nlp/modeling](../modeling).
-ALBERT V2 models compatible with TF 2.x checkpoints are:**
-
-* **[`ALBERT V2 Base`](https://storage.googleapis.com/cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base.tar.gz)**:
- 12-layer, 768-hidden, 12-heads, 12M parameters
-* **[`ALBERT V2 Large`](https://storage.googleapis.com/cloud-tpu-checkpoints/albert/checkpoints/albert_v2_large.tar.gz)**:
- 24-layer, 1024-hidden, 16-heads, 18M parameters
-* **[`ALBERT V2 XLarge`](https://storage.googleapis.com/cloud-tpu-checkpoints/albert/checkpoints/albert_v2_xlarge.tar.gz)**:
- 24-layer, 2048-hidden, 32-heads, 60M parameters
-* **[`ALBERT V2 XXLarge`](https://storage.googleapis.com/cloud-tpu-checkpoints/albert/checkpoints/albert_v2_xxlarge.tar.gz)**:
- 12-layer, 4096-hidden, 64-heads, 235M parameters
-
-We recommend to host checkpoints on Google Cloud storage buckets when you use
-Cloud GPU/TPU.
-
-### Restoring from Checkpoints
-
-`tf.train.Checkpoint` is used to manage model checkpoints in TF 2. To restore
-weights from provided pre-trained checkpoints, you can use the following code:
-
-```python
-init_checkpoint='the pretrained model checkpoint path.'
-model=tf.keras.Model() # Bert pre-trained model as feature extractor.
-checkpoint = tf.train.Checkpoint(model=model)
-checkpoint.restore(init_checkpoint)
-```
-
-Checkpoints featuring native serialized Keras models
-(i.e. model.load()/load_weights()) will be available soon.
-
-### Access to Pretrained hub modules.
-
-Pretrained tf.hub modules in TF 2.x SavedModel format can be found in the
-following links:
-
-* **[`ALBERT V2 Base`](https://tfhub.dev/tensorflow/albert_en_base/1)**:
- 12-layer, 768-hidden, 12-heads, 12M parameters
-* **[`ALBERT V2 Large`](https://tfhub.dev/tensorflow/albert_en_large/1)**:
- 24-layer, 1024-hidden, 16-heads, 18M parameters
-* **[`ALBERT V2 XLarge`](https://tfhub.dev/tensorflow/albert_en_xlarge/1)**:
- 24-layer, 2048-hidden, 32-heads, 60M parameters
-* **[`ALBERT V2 XXLarge`](https://tfhub.dev/tensorflow/albert_en_xxlarge/1)**:
- 12-layer, 4096-hidden, 64-heads, 235M parameters
-
-## Set Up
-
-```shell
-export PYTHONPATH="$PYTHONPATH:/path/to/models"
-```
-
-Install `tf-nightly` to get latest updates:
-
-```shell
-pip install tf-nightly-gpu
-```
-
-With TPU, GPU support is not necessary. First, you need to create a `tf-nightly`
-TPU with [ctpu tool](https://github.com/tensorflow/tpu/tree/master/tools/ctpu):
-
-```shell
-ctpu up -name --tf-version=”nightly”
-```
-
-Second, you need to install TF 2 `tf-nightly` on your VM:
-
-```shell
-pip install tf-nightly
-```
-
-Warning: More details TPU-specific set-up instructions and tutorial should come
-along with official TF 2.x release for TPU. Note that this repo is not
-officially supported by Google Cloud TPU team yet until TF 2.1 released.
-
-## Process Datasets
-
-### Pre-training
-
-Pre-train ALBERT using TF2.x will come soon.
-For now, please use [ALBERT research repo](https://github.com/google-research/ALBERT)
-to pretrain the model and convert the checkpoint to TF2.x compatible ones using
-[tf2_albert_encoder_checkpoint_converter.py](tf2_albert_encoder_checkpoint_converter.py).
-
-
-
-### Fine-tuning
-
-To prepare the fine-tuning data for final model training, use the
-[`../data/create_finetuning_data.py`](../data/create_finetuning_data.py) script.
-Note that different from BERT models that use word piece tokenzer,
-ALBERT models employ sentence piece tokenizer. So the FLAG tokenizer_impl has
-to be set to 'sentence_piece'.
-Resulting datasets in `tf_record` format and training meta data should be later
-passed to training or evaluation scripts. The task-specific arguments are
-described in following sections:
-
-* GLUE
-
-Users can download the
-[GLUE data](https://gluebenchmark.com/tasks) by running
-[this script](https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e)
-and unpack it to some directory `$GLUE_DIR`.
-
-```shell
-export GLUE_DIR=~/glue
-export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base
-
-export TASK_NAME=MNLI
-export OUTPUT_DIR=gs://some_bucket/datasets
-python ../data/create_finetuning_data.py \
- --input_data_dir=${GLUE_DIR}/${TASK_NAME}/ \
- --sp_model_file=${ALBERT_DIR}/30k-clean.model \
- --train_data_output_path=${OUTPUT_DIR}/${TASK_NAME}_train.tf_record \
- --eval_data_output_path=${OUTPUT_DIR}/${TASK_NAME}_eval.tf_record \
- --meta_data_file_path=${OUTPUT_DIR}/${TASK_NAME}_meta_data \
- --fine_tuning_task_type=classification --max_seq_length=128 \
- --classification_task_name=${TASK_NAME} \
- --tokenizer_impl=sentence_piece
-```
-
-* SQUAD
-
-The [SQuAD website](https://rajpurkar.github.io/SQuAD-explorer/) contains
-detailed information about the SQuAD datasets and evaluation.
-
-The necessary files can be found here:
-
-* [train-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json)
-* [dev-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json)
-* [evaluate-v1.1.py](https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py)
-* [train-v2.0.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json)
-* [dev-v2.0.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json)
-* [evaluate-v2.0.py](https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/)
-
-```shell
-export SQUAD_DIR=~/squad
-export SQUAD_VERSION=v1.1
-export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base
-export OUTPUT_DIR=gs://some_bucket/datasets
-
-python ../data/create_finetuning_data.py \
- --squad_data_file=${SQUAD_DIR}/train-${SQUAD_VERSION}.json \
- --sp_model_file=${ALBERT_DIR}/30k-clean.model \
- --train_data_output_path=${OUTPUT_DIR}/squad_${SQUAD_VERSION}_train.tf_record \
- --meta_data_file_path=${OUTPUT_DIR}/squad_${SQUAD_VERSION}_meta_data \
- --fine_tuning_task_type=squad --max_seq_length=384 \
- --tokenizer_impl=sentence_piece
-```
-
-## Fine-tuning with ALBERT
-
-### Cloud GPUs and TPUs
-
-* Cloud Storage
-
-The unzipped pre-trained model files can also be found in the Google Cloud
-Storage folder `gs://cloud-tpu-checkpoints/albert/checkpoints`. For example:
-
-```shell
-export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base
-export MODEL_DIR=gs://some_bucket/my_output_dir
-```
-
-Currently, users are able to access to `tf-nightly` TPUs and the following TPU
-script should run with `tf-nightly`.
-
-* GPU -> TPU
-
-Just add the following flags to `run_classifier.py` or `run_squad.py`:
-
-```shell
- --distribution_strategy=tpu
- --tpu=grpc://${TPU_IP_ADDRESS}:8470
-```
-
-### Sentence and Sentence-pair Classification Tasks
-
-This example code fine-tunes `albert_v2_base` on the Microsoft Research
-Paraphrase Corpus (MRPC) corpus, which only contains 3,600 examples and can
-fine-tune in a few minutes on most GPUs.
-
-We use the `albert_v2_base` as an example throughout the
-workflow.
-
-
-```shell
-export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base
-export MODEL_DIR=gs://some_bucket/my_output_dir
-export GLUE_DIR=gs://some_bucket/datasets
-export TASK=MRPC
-
-python run_classifier.py \
- --mode='train_and_eval' \
- --input_meta_data_path=${GLUE_DIR}/${TASK}_meta_data \
- --train_data_path=${GLUE_DIR}/${TASK}_train.tf_record \
- --eval_data_path=${GLUE_DIR}/${TASK}_eval.tf_record \
- --bert_config_file=${ALBERT_DIR}/albert_config.json \
- --init_checkpoint=${ALBERT_DIR}/bert_model.ckpt \
- --train_batch_size=4 \
- --eval_batch_size=4 \
- --steps_per_loop=1 \
- --learning_rate=2e-5 \
- --num_train_epochs=3 \
- --model_dir=${MODEL_DIR} \
- --distribution_strategy=mirrored
-```
-
-Alternatively, instead of specifying `init_checkpoint`, you can specify
-`hub_module_url` to employ a pretraind BERT hub module, e.g.,
-` --hub_module_url=https://tfhub.dev/tensorflow/albert_en_base/1`.
-
-To use TPU, you only need to switch distribution strategy type to `tpu` with TPU
-information and use remote storage for model checkpoints.
-
-```shell
-export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base
-export TPU_IP_ADDRESS='???'
-export MODEL_DIR=gs://some_bucket/my_output_dir
-export GLUE_DIR=gs://some_bucket/datasets
-
-python run_classifier.py \
- --mode='train_and_eval' \
- --input_meta_data_path=${GLUE_DIR}/${TASK}_meta_data \
- --train_data_path=${GLUE_DIR}/${TASK}_train.tf_record \
- --eval_data_path=${GLUE_DIR}/${TASK}_eval.tf_record \
- --bert_config_file=$ALBERT_DIR/albert_config.json \
- --init_checkpoint=$ALBERT_DIR/bert_model.ckpt \
- --train_batch_size=32 \
- --eval_batch_size=32 \
- --learning_rate=2e-5 \
- --num_train_epochs=3 \
- --model_dir=${MODEL_DIR} \
- --distribution_strategy=tpu \
- --tpu=grpc://${TPU_IP_ADDRESS}:8470
-```
-
-### SQuAD 1.1
-
-The Stanford Question Answering Dataset (SQuAD) is a popular question answering
-benchmark dataset. See more in [SQuAD website](https://rajpurkar.github.io/SQuAD-explorer/).
-
-We use the `albert_v2_base` as an example throughout the
-workflow.
-
-```shell
-export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base
-export SQUAD_DIR=gs://some_bucket/datasets
-export MODEL_DIR=gs://some_bucket/my_output_dir
-export SQUAD_VERSION=v1.1
-
-python run_squad.py \
- --input_meta_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_meta_data \
- --train_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_train.tf_record \
- --predict_file=${SQUAD_DIR}/dev-v1.1.json \
- --sp_model_file=${ALBERT_DIR}/30k-clean.model \
- --bert_config_file=$ALBERT_DIR/albert_config.json \
- --init_checkpoint=$ALBERT_DIR/bert_model.ckpt \
- --train_batch_size=4 \
- --predict_batch_size=4 \
- --learning_rate=8e-5 \
- --num_train_epochs=2 \
- --model_dir=${MODEL_DIR} \
- --distribution_strategy=mirrored
-```
-
-Similarily, you can replace `init_checkpoint` FLAGS with `hub_module_url` to
-specify a hub module path.
-
-To use TPU, you need switch distribution strategy type to `tpu` with TPU
-information.
-
-```shell
-export ALBERT_DIR=gs://cloud-tpu-checkpoints/albert/checkpoints/albert_v2_base
-export TPU_IP_ADDRESS='???'
-export MODEL_DIR=gs://some_bucket/my_output_dir
-export SQUAD_DIR=gs://some_bucket/datasets
-export SQUAD_VERSION=v1.1
-
-python run_squad.py \
- --input_meta_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_meta_data \
- --train_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_train.tf_record \
- --predict_file=${SQUAD_DIR}/dev-v1.1.json \
- --sp_model_file=${ALBERT_DIR}/30k-clean.model \
- --bert_config_file=$ALBERT_DIR/albert_config.json \
- --init_checkpoint=$ALBERT_DIR/bert_model.ckpt \
- --train_batch_size=32 \
- --learning_rate=8e-5 \
- --num_train_epochs=2 \
- --model_dir=${MODEL_DIR} \
- --distribution_strategy=tpu \
- --tpu=grpc://${TPU_IP_ADDRESS}:8470
-```
-
-The dev set predictions will be saved into a file called predictions.json in the
-model_dir:
-
-```shell
-python $SQUAD_DIR/evaluate-v1.1.py $SQUAD_DIR/dev-v1.1.json ./squad/predictions.json
-```
diff --git a/spaces/NNDM/img-to-music/share_btn.py b/spaces/NNDM/img-to-music/share_btn.py
deleted file mode 100644
index 1a2ac6a6e74b114dbd54c2f24723a87180db51ef..0000000000000000000000000000000000000000
--- a/spaces/NNDM/img-to-music/share_btn.py
+++ /dev/null
@@ -1,100 +0,0 @@
-community_icon_html = """"""
-
-loading_icon_html = """"""
-
-share_js = """async () => {
- async function uploadFile(file){
- const UPLOAD_URL = 'https://huggingface.co/uploads';
- const response = await fetch(UPLOAD_URL, {
- method: 'POST',
- headers: {
- 'Content-Type': file.type,
- 'X-Requested-With': 'XMLHttpRequest',
- },
- body: file, /// <- File inherits from Blob
- });
- const url = await response.text();
- return url;
- }
- async function getInputImgFile(imgEl){
- const res = await fetch(imgEl.src);
- const blob = await res.blob();
- const imgId = Date.now() % 200;
- const isPng = imgEl.src.startsWith(`data:image/png`);
- if(isPng){
- const fileName = `sd-perception-${{imgId}}.png`;
- return new File([blob], fileName, { type: 'image/png' });
- }else{
- const fileName = `sd-perception-${{imgId}}.jpg`;
- return new File([blob], fileName, { type: 'image/jpeg' });
- }
- }
- async function getOutputMusicFile(audioEL){
- const res = await fetch(audioEL.src);
- const blob = await res.blob();
- const audioId = Date.now() % 200;
- const fileName = `img-to-music-${{audioId}}.wav`;
- const musicBlob = new File([blob], fileName, { type: 'audio/wav' });
- console.log(musicBlob);
- return musicBlob;
- }
-
- async function audioToBase64(audioFile) {
- return new Promise((resolve, reject) => {
- let reader = new FileReader();
- reader.readAsDataURL(audioFile);
- reader.onload = () => resolve(reader.result);
- reader.onerror = error => reject(error);
-
- });
- }
- const gradioEl = document.querySelector('body > gradio-app');
- // const gradioEl = document.querySelector("gradio-app").shadowRoot;
- const inputImgEl = gradioEl.querySelector('#input-img img');
- const outputMusic = gradioEl.querySelector('#music-output audio');
- const outputMusic_src = gradioEl.querySelector('#music-output audio').src;
- const outputMusic_name = outputMusic_src.split('/').pop();
- let titleTxt = outputMusic_name;
- //if(titleTxt.length > 100){
- // titleTxt = titleTxt.slice(0, 100) + ' ...';
- //}
- const shareBtnEl = gradioEl.querySelector('#share-btn');
- const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
- const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
- if(!outputMusic){
- return;
- };
- shareBtnEl.style.pointerEvents = 'none';
- shareIconEl.style.display = 'none';
- loadingIconEl.style.removeProperty('display');
- const inputFile = await getInputImgFile(inputImgEl);
- const urlInputImg = await uploadFile(inputFile);
- const musicFile = await getOutputMusicFile(outputMusic);
- const dataOutputMusic = await uploadFile(musicFile);
-
- const descriptionMd = `#### Input img:
-
-
-#### Music:
-
-
-`;
- const params = new URLSearchParams({
- title: titleTxt,
- description: descriptionMd,
- });
- const paramsStr = params.toString();
- window.open(`https://huggingface.co/spaces/fffiloni/img-to-music/discussions/new?${paramsStr}`, '_blank');
- shareBtnEl.style.removeProperty('pointer-events');
- shareIconEl.style.removeProperty('display');
- loadingIconEl.style.display = 'none';
-}"""
\ No newline at end of file
diff --git a/spaces/NeuralInternet/Text-Generation_Playground/modules/extensions.py b/spaces/NeuralInternet/Text-Generation_Playground/modules/extensions.py
deleted file mode 100644
index c8de8a7bc9ebd331d65704996a764e7cc279a6e5..0000000000000000000000000000000000000000
--- a/spaces/NeuralInternet/Text-Generation_Playground/modules/extensions.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import extensions
-import modules.shared as shared
-
-state = {}
-available_extensions = []
-
-def load_extensions():
- global state
- for i, name in enumerate(shared.args.extensions):
- if name in available_extensions:
- print(f'Loading the extension "{name}"... ', end='')
- exec(f"import extensions.{name}.script")
- state[name] = [True, i]
- print('Ok.')
-
-# This iterator returns the extensions in the order specified in the command-line
-def iterator():
- for name in sorted(state, key=lambda x : state[x][1]):
- if state[name][0] == True:
- yield eval(f"extensions.{name}.script"), name
-
-# Extension functions that map string -> string
-def apply_extensions(text, typ):
- for extension, _ in iterator():
- if typ == "input" and hasattr(extension, "input_modifier"):
- text = extension.input_modifier(text)
- elif typ == "output" and hasattr(extension, "output_modifier"):
- text = extension.output_modifier(text)
- elif typ == "bot_prefix" and hasattr(extension, "bot_prefix_modifier"):
- text = extension.bot_prefix_modifier(text)
- return text
-
-def create_extensions_block():
- # Updating the default values
- for extension, name in iterator():
- if hasattr(extension, 'params'):
- for param in extension.params:
- _id = f"{name}-{param}"
- if _id in shared.settings:
- extension.params[param] = shared.settings[_id]
-
- # Creating the extension ui elements
- for extension, name in iterator():
- if hasattr(extension, "ui"):
- extension.ui()
diff --git a/spaces/NikeZoldyck/green-screen-composition-transfer/models/models.py b/spaces/NikeZoldyck/green-screen-composition-transfer/models/models.py
deleted file mode 100644
index 94873f8385e7780d1a0a17ee247bba0a917e9140..0000000000000000000000000000000000000000
--- a/spaces/NikeZoldyck/green-screen-composition-transfer/models/models.py
+++ /dev/null
@@ -1,297 +0,0 @@
-"""
-Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
-Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
-"""
-import torch.nn as nn
-
-
-class VGGEncoder(nn.Module):
- def __init__(self, level):
- super(VGGEncoder, self).__init__()
- self.level = level
-
- # 224 x 224
- self.conv0 = nn.Conv2d(3, 3, 1, 1, 0)
-
- self.pad1_1 = nn.ReflectionPad2d((1, 1, 1, 1))
- # 226 x 226
- self.conv1_1 = nn.Conv2d(3, 64, 3, 1, 0)
- self.relu1_1 = nn.ReLU(inplace=True)
- # 224 x 224
-
- if level < 2: return
-
- self.pad1_2 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 0)
- self.relu1_2 = nn.ReLU(inplace=True)
- # 224 x 224
- self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
- # 112 x 112
-
- self.pad2_1 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv2_1 = nn.Conv2d(64, 128, 3, 1, 0)
- self.relu2_1 = nn.ReLU(inplace=True)
- # 112 x 112
-
- if level < 3: return
-
- self.pad2_2 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 0)
- self.relu2_2 = nn.ReLU(inplace=True)
- # 112 x 112
-
- self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
- # 56 x 56
-
- self.pad3_1 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv3_1 = nn.Conv2d(128, 256, 3, 1, 0)
- self.relu3_1 = nn.ReLU(inplace=True)
- # 56 x 56
-
- if level < 4: return
-
- self.pad3_2 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 0)
- self.relu3_2 = nn.ReLU(inplace=True)
- # 56 x 56
-
- self.pad3_3 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 0)
- self.relu3_3 = nn.ReLU(inplace=True)
- # 56 x 56
-
- self.pad3_4 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv3_4 = nn.Conv2d(256, 256, 3, 1, 0)
- self.relu3_4 = nn.ReLU(inplace=True)
- # 56 x 56
-
- self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
- # 28 x 28
-
- self.pad4_1 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv4_1 = nn.Conv2d(256, 512, 3, 1, 0)
- self.relu4_1 = nn.ReLU(inplace=True)
- # 28 x 28
-
- def forward(self, x):
- out = self.conv0(x)
-
- out = self.pad1_1(out)
- out = self.conv1_1(out)
- out = self.relu1_1(out)
-
- if self.level < 2:
- return out
-
- out = self.pad1_2(out)
- out = self.conv1_2(out)
- pool1 = self.relu1_2(out)
-
- out, pool1_idx = self.maxpool1(pool1)
-
- out = self.pad2_1(out)
- out = self.conv2_1(out)
- out = self.relu2_1(out)
-
- if self.level < 3:
- return out, pool1_idx, pool1.size()
-
- out = self.pad2_2(out)
- out = self.conv2_2(out)
- pool2 = self.relu2_2(out)
-
- out, pool2_idx = self.maxpool2(pool2)
-
- out = self.pad3_1(out)
- out = self.conv3_1(out)
- out = self.relu3_1(out)
-
- if self.level < 4:
- return out, pool1_idx, pool1.size(), pool2_idx, pool2.size()
-
- out = self.pad3_2(out)
- out = self.conv3_2(out)
- out = self.relu3_2(out)
-
- out = self.pad3_3(out)
- out = self.conv3_3(out)
- out = self.relu3_3(out)
-
- out = self.pad3_4(out)
- out = self.conv3_4(out)
- pool3 = self.relu3_4(out)
- out, pool3_idx = self.maxpool3(pool3)
-
- out = self.pad4_1(out)
- out = self.conv4_1(out)
- out = self.relu4_1(out)
-
- return out, pool1_idx, pool1.size(), pool2_idx, pool2.size(), pool3_idx, pool3.size()
-
- def forward_multiple(self, x):
- out = self.conv0(x)
-
- out = self.pad1_1(out)
- out = self.conv1_1(out)
- out = self.relu1_1(out)
-
- if self.level < 2: return out
-
- out1 = out
-
- out = self.pad1_2(out)
- out = self.conv1_2(out)
- pool1 = self.relu1_2(out)
-
- out, pool1_idx = self.maxpool1(pool1)
-
- out = self.pad2_1(out)
- out = self.conv2_1(out)
- out = self.relu2_1(out)
-
- if self.level < 3: return out, out1
-
- out2 = out
-
- out = self.pad2_2(out)
- out = self.conv2_2(out)
- pool2 = self.relu2_2(out)
-
- out, pool2_idx = self.maxpool2(pool2)
-
- out = self.pad3_1(out)
- out = self.conv3_1(out)
- out = self.relu3_1(out)
-
- if self.level < 4: return out, out2, out1
-
- out3 = out
-
- out = self.pad3_2(out)
- out = self.conv3_2(out)
- out = self.relu3_2(out)
-
- out = self.pad3_3(out)
- out = self.conv3_3(out)
- out = self.relu3_3(out)
-
- out = self.pad3_4(out)
- out = self.conv3_4(out)
- pool3 = self.relu3_4(out)
- out, pool3_idx = self.maxpool3(pool3)
-
- out = self.pad4_1(out)
- out = self.conv4_1(out)
- out = self.relu4_1(out)
-
- return out, out3, out2, out1
-
-
-class VGGDecoder(nn.Module):
- def __init__(self, level):
- super(VGGDecoder, self).__init__()
- self.level = level
-
- if level > 3:
- self.pad4_1 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv4_1 = nn.Conv2d(512, 256, 3, 1, 0)
- self.relu4_1 = nn.ReLU(inplace=True)
- # 28 x 28
-
- self.unpool3 = nn.MaxUnpool2d(kernel_size=2, stride=2)
- # 56 x 56
-
- self.pad3_4 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv3_4 = nn.Conv2d(256, 256, 3, 1, 0)
- self.relu3_4 = nn.ReLU(inplace=True)
- # 56 x 56
-
- self.pad3_3 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 0)
- self.relu3_3 = nn.ReLU(inplace=True)
- # 56 x 56
-
- self.pad3_2 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 0)
- self.relu3_2 = nn.ReLU(inplace=True)
- # 56 x 56
-
- if level > 2:
- self.pad3_1 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv3_1 = nn.Conv2d(256, 128, 3, 1, 0)
- self.relu3_1 = nn.ReLU(inplace=True)
- # 56 x 56
-
- self.unpool2 = nn.MaxUnpool2d(kernel_size=2, stride=2)
- # 112 x 112
-
- self.pad2_2 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 0)
- self.relu2_2 = nn.ReLU(inplace=True)
- # 112 x 112
-
- if level > 1:
- self.pad2_1 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv2_1 = nn.Conv2d(128, 64, 3, 1, 0)
- self.relu2_1 = nn.ReLU(inplace=True)
- # 112 x 112
-
- self.unpool1 = nn.MaxUnpool2d(kernel_size=2, stride=2)
- # 224 x 224
-
- self.pad1_2 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 0)
- self.relu1_2 = nn.ReLU(inplace=True)
- # 224 x 224
-
- if level > 0:
- self.pad1_1 = nn.ReflectionPad2d((1, 1, 1, 1))
- self.conv1_1 = nn.Conv2d(64, 3, 3, 1, 0)
-
- def forward(self, x, pool1_idx=None, pool1_size=None, pool2_idx=None, pool2_size=None, pool3_idx=None,
- pool3_size=None):
- out = x
-
- if self.level > 3:
- out = self.pad4_1(out)
- out = self.conv4_1(out)
- out = self.relu4_1(out)
- out = self.unpool3(out, pool3_idx, output_size=pool3_size)
-
- out = self.pad3_4(out)
- out = self.conv3_4(out)
- out = self.relu3_4(out)
-
- out = self.pad3_3(out)
- out = self.conv3_3(out)
- out = self.relu3_3(out)
-
- out = self.pad3_2(out)
- out = self.conv3_2(out)
- out = self.relu3_2(out)
-
- if self.level > 2:
- out = self.pad3_1(out)
- out = self.conv3_1(out)
- out = self.relu3_1(out)
- out = self.unpool2(out, pool2_idx, output_size=pool2_size)
-
- out = self.pad2_2(out)
- out = self.conv2_2(out)
- out = self.relu2_2(out)
-
- if self.level > 1:
- out = self.pad2_1(out)
- out = self.conv2_1(out)
- out = self.relu2_1(out)
- out = self.unpool1(out, pool1_idx, output_size=pool1_size)
-
- out = self.pad1_2(out)
- out = self.conv1_2(out)
- out = self.relu1_2(out)
-
- if self.level > 0:
- out = self.pad1_1(out)
- out = self.conv1_1(out)
-
- return out
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/simultaneous_translation/tests/test_text_models.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/simultaneous_translation/tests/test_text_models.py
deleted file mode 100644
index 127adfa6337333ba5ae598fcd158956def0d520f..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/simultaneous_translation/tests/test_text_models.py
+++ /dev/null
@@ -1,407 +0,0 @@
-import argparse
-import unittest
-from typing import Any, Dict
-
-import torch
-from examples.simultaneous_translation.models import (
- transformer_monotonic_attention
-)
-
-
-from tests.test_roberta import FakeTask
-
-
-DEFAULT_CONFIG = {
- "attention_eps": 1e-6,
- "mass_preservation": True,
- "noise_type": "flat",
- "noise_mean": 0.0,
- "noise_var": 1.0,
- "energy_bias_init": -2,
- "energy_bias": True
-}
-
-
-PAD_INDEX = 1
-
-
-def generate_config(overrides_kv):
- new_dict = {key: value for key, value in DEFAULT_CONFIG.items()}
- for key, value in overrides_kv.items():
- new_dict[key] = value
- return new_dict
-
-
-def make_sample_with_padding(longer_src=False) -> Dict[str, Any]:
- tokens_1 = torch.LongTensor(
- [
- [2, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15, 2],
- [
- 2, 11, 12, 14, 15, 10, 11, 12, 13, 14, 15, 2,
- PAD_INDEX, PAD_INDEX
- ],
- ]
- )
- tokens_2 = torch.LongTensor(
- [
- [2, 11, 12, 13, 14, 2, PAD_INDEX, PAD_INDEX],
- [2, 11, 22, 33, 2, PAD_INDEX, PAD_INDEX, PAD_INDEX]
- ]
- )
- if longer_src:
- src_tokens = tokens_1[:, 1:]
- prev_output_tokens = tokens_2
- else:
- src_tokens = tokens_2[:, 1:8]
- prev_output_tokens = tokens_1
-
- src_lengths = src_tokens.ne(PAD_INDEX).sum(dim=1).long()
-
- sample = {
- "net_input": {
- "src_tokens": src_tokens,
- "prev_output_tokens": prev_output_tokens,
- "src_lengths": src_lengths,
- },
- "target": prev_output_tokens[:, 1:],
- }
- return sample
-
-
-def build_transformer_monotonic_attention(**extra_args: Any):
- overrides = {
- # Use characteristics dimensions
- "encoder_embed_dim": 12,
- "encoder_ffn_embed_dim": 14,
- "decoder_embed_dim": 12,
- "decoder_ffn_embed_dim": 14,
- # Disable dropout so we have comparable tests.
- "dropout": 0,
- "attention_dropout": 0,
- "activation_dropout": 0,
- "encoder_layerdrop": 0,
- }
- overrides.update(extra_args)
- # Overrides the defaults from the parser
- args = argparse.Namespace(**overrides)
- transformer_monotonic_attention.monotonic_tiny_architecture(args)
-
- torch.manual_seed(0)
- task = FakeTask(args)
- return (
- transformer_monotonic_attention
- .TransformerModelSimulTrans
- .build_model(args, task)
- )
-
-
-def expected_alignment_formula(
- p_choose,
- mass_perservation=True,
- padding_mask=None
-):
- # Online and Linear-Time Attention by Enforcing Monotonic Alignments
- # https://arxiv.org/pdf/1704.00784.pdf
- # Eq 18, 19
- bsz, tgt_len, src_len = p_choose.size()
- alpha = torch.zeros_like(p_choose)
-
- if padding_mask is not None:
- bsz_pad = padding_mask.size(0)
- num_heads = int(bsz / bsz_pad)
- padding_mask = (
- padding_mask
- .unsqueeze(1)
- .expand([bsz_pad, num_heads, src_len])
- .contiguous()
- .view(-1, src_len)
- )
-
- p_choose = p_choose.masked_fill(padding_mask.unsqueeze(1), 0)
-
- for bsz_i in range(bsz):
- for i in range(tgt_len):
- for j in range(src_len):
- if i == 0:
- if j == 0:
- # First source token
- alpha[bsz_i, i, j] = p_choose[bsz_i, i, j]
- else:
- # First target token
- alpha[bsz_i, i, j] = (
- p_choose[bsz_i, i, j]
- * torch.prod(
- 1 - p_choose[bsz_i, i, :j]
- )
- )
- else:
- alpha[bsz_i, i, j] = alpha[bsz_i, i - 1, j]
- for k in range(j):
- alpha[bsz_i, i, j] += (
- alpha[bsz_i, i - 1, k]
- * torch.prod(
- 1 - p_choose[bsz_i, i, k:j]
- )
- )
- alpha[bsz_i, i, j] *= p_choose[bsz_i, i, j]
-
- alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0)
-
- if mass_perservation:
- alpha = mass_perservation_formula(alpha, False, padding_mask)
-
- return alpha
-
-
-def mass_perservation_formula(alpha, left_padding=False, padding_mask=None):
- if padding_mask is None or alpha.size(-1) == 1:
- if alpha.size(-1) > 1:
- alpha[:, :, -1] = 1 - alpha[:, :, :-1].sum(dim=-1)
- return alpha
-
- src_lens = (padding_mask.logical_not()).sum(dim=1).long()
-
- bsz, tgt_len, src_len = alpha.size()
-
- assert (
- not left_padding
- or (left_padding and (not padding_mask[:, 0].any()))
- )
-
- alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0)
-
- for bsz_i in range(bsz):
- if left_padding:
- alpha[bsz_i, :, -1] = (
- 1 - alpha[bsz_i, :, :-1].sum(dim=-1)
- )
- else:
- alpha[bsz_i, :, src_lens[bsz_i] - 1] = (
- 1 - alpha[bsz_i, :, :src_lens[bsz_i] - 1].sum(dim=-1)
- )
-
- return alpha
-
-
-def expected_soft_attention_formula(
- alpha,
- soft_energy,
- padding_mask=None,
- chunksize=1e10,
-):
- # Monotonic Infinite Lookback Attention for Simultaneous Machine Translation
- # https://arxiv.org/pdf/1906.05218.pdf
- # Eq 14
-
- # Monotonic Chunkwise Attention
- # https://arxiv.org/abs/1712.05382
- # Eq 17
- bsz, tgt_len, src_len = alpha.size()
- beta = torch.zeros_like(alpha)
-
- if padding_mask is not None:
- bsz_pad = padding_mask.size(0)
- num_heads = int(bsz / bsz_pad)
- # Expanding for potential head dimension
- padding_mask = (
- padding_mask
- .unsqueeze(1)
- .expand([bsz_pad, num_heads, src_len])
- .contiguous()
- .view(-1, src_len)
- )
- soft_energy = soft_energy.masked_fill(padding_mask.unsqueeze(1), float('-inf'))
-
- for bsz_i in range(bsz):
- for i in range(tgt_len):
- for j in range(src_len):
- for k in range(j, min([src_len, j + chunksize])):
- if not padding_mask[bsz_i, j]:
- beta[bsz_i, i, j] += (
- alpha[bsz_i, i, k] * torch.exp(soft_energy[bsz_i, i, j])
- / torch.sum(torch.exp(soft_energy[bsz_i, i, max([0, k - chunksize + 1]):k + 1]))
- )
- return beta
-
-
-class MonotonicAttentionTestAbstractClass(object):
- def test_forward(self):
- sample = make_sample_with_padding()
- out, _ = self.model.forward(**sample["net_input"])
- loss = out.sum()
- loss.backward()
-
- def test_p_choose(self):
- sample = make_sample_with_padding()
- _, extra_out = self.model.forward(**sample["net_input"])
- for item in extra_out.attn_list:
- p_choose = item["p_choose"]
- self.assertTrue(p_choose.le(1.0).all())
- self.assertTrue(p_choose.ge(0.0).all())
-
- def test_expected_alignment(self):
- for longer_src in [True, False]:
- sample = make_sample_with_padding(longer_src)
- _, extra_out = self.model.forward(**sample["net_input"])
- for item in extra_out.attn_list:
- p_choose = item["p_choose"]
- alpha_system = item["alpha"]
- self.assertTrue(p_choose.size() == alpha_system.size())
- bsz, num_head, tgt_len, src_len = alpha_system.size()
- alpha_system = alpha_system.view(-1, tgt_len, src_len)
- p_choose = p_choose.view(-1, tgt_len, src_len)
-
- alpha_real = expected_alignment_formula(
- p_choose,
- self.model.decoder.layers[0].encoder_attn.mass_preservation,
- sample["net_input"]["src_tokens"].eq(PAD_INDEX)
- )
-
- self.assertTrue(
- torch.abs(alpha_system - alpha_real).le(5e-5).all(),
- )
-
-
-class HardMonotonicAttentionTestCase(
- unittest.TestCase,
- MonotonicAttentionTestAbstractClass
-):
- def setUp(self):
- self.model = build_transformer_monotonic_attention(
- **generate_config({"simul_type": "hard_aligned"})
- )
-
-
-class InfiniteLookbackTestCase(
- unittest.TestCase,
- MonotonicAttentionTestAbstractClass
-):
- def setUp(self):
- self.model = build_transformer_monotonic_attention(
- **generate_config(
- {
- "simul_type": "infinite_lookback"
- }
- )
- )
- self.model.train()
-
- def test_fp16_for_long_input(self):
- sample = {
- "net_input": {
- "src_tokens": torch.LongTensor([7] * 1000 + [2]).cuda().unsqueeze(0),
- "prev_output_tokens": torch.LongTensor([7] * 1000 + [2]).cuda().unsqueeze(0),
- "src_lengths": torch.LongTensor([1000]).cuda(),
- },
- "target": torch.LongTensor([2] + [7] * 1000).unsqueeze(0).cuda()
- }
- self.model.cuda().half()
- _, extra_out = self.model.forward(**sample["net_input"])
- for item in extra_out.attn_list:
- for key in ["p_choose", "alpha", "beta", "soft_energy"]:
- self.assertFalse(torch.isnan(item[key]).any())
-
- def test_expected_attention(self):
- for longer_src in [True, False]:
- sample = make_sample_with_padding(longer_src)
- _, extra_out = self.model.forward(**sample["net_input"])
- for item in extra_out.attn_list:
- p_choose = item["p_choose"]
- alpha_system = item["alpha"]
- beta_system = item["beta"]
- soft_energy_system = item["soft_energy"]
- self.assertTrue(beta_system.size() == alpha_system.size())
- self.assertTrue(p_choose.size() == alpha_system.size())
-
- bsz, num_head, tgt_len, src_len = alpha_system.size()
-
- alpha_system = alpha_system.view(-1, tgt_len, src_len)
- beta_system = beta_system.view(-1, tgt_len, src_len)
- p_choose = p_choose.view(-1, tgt_len, src_len)
- soft_energy_system = soft_energy_system.view(-1, tgt_len, src_len)
-
- alpha_real = expected_alignment_formula(
- p_choose,
- self.model.decoder.layers[0].encoder_attn.mass_preservation,
- sample["net_input"]["src_tokens"].eq(PAD_INDEX)
- )
-
- beta_real = expected_soft_attention_formula(
- alpha_real,
- soft_energy_system,
- sample["net_input"]["src_tokens"].eq(PAD_INDEX),
- chunksize=getattr(
- self.model.decoder.layers[0].encoder_attn,
- "chunk_size",
- int(1e10)
- )
- )
-
- self.assertTrue(
- torch.abs(beta_system - beta_real).le(1e-5).all(),
- )
-
-
-class ChunkwiswTestCase(
- InfiniteLookbackTestCase
-):
- def setUp(self):
- self.model = build_transformer_monotonic_attention(
- **generate_config(
- {
- "simul_type": "chunkwise",
- "mocha_chunk_size": 3
- }
- )
- )
-
-
-class WaitkTestCase(InfiniteLookbackTestCase):
- def setUp(self):
- self.model = build_transformer_monotonic_attention(
- **generate_config(
- {
- "simul_type": "waitk",
- "waitk_lagging": 3,
- }
- )
- )
-
- def check_waitk(self, p_choose, lagging, padding_mask):
- bsz, tgt_len, src_len = p_choose.size()
- for bsz_i in range(bsz):
- for i in range(tgt_len):
- for j in range(src_len):
- if not padding_mask[bsz_i, j]:
- if j - i == lagging - 1:
- self.assertTrue(p_choose[bsz_i, i, j] == 1)
- else:
- self.assertTrue(p_choose[bsz_i, i, j] == 0)
-
- def test_waitk_p_choose(self):
- for longer_src in [True, False]:
- for k in [1, 3, 10, 20, 100]:
- sample = make_sample_with_padding(longer_src)
- model = build_transformer_monotonic_attention(
- **generate_config(
- {
- "simul_type": "waitk",
- "waitk_lagging": k,
- }
- )
- )
- model.train()
- _, extra_out = model.forward(**sample["net_input"])
- for item in extra_out.attn_list:
- p_choose = item["p_choose"]
- bsz, num_heads, tgt_len, src_len = p_choose.size()
- padding_mask = sample["net_input"]["src_tokens"].eq(PAD_INDEX)
- padding_mask = (
- padding_mask
- .unsqueeze(1)
- .expand([bsz, num_heads, src_len])
- .contiguous()
- .view(-1, src_len)
- )
- p_choose = p_choose.view(bsz * num_heads, tgt_len, src_len)
- self.check_waitk(p_choose, k, padding_mask)
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/nat/nonautoregressive_ensembles.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/nat/nonautoregressive_ensembles.py
deleted file mode 100644
index 705a04fb49658c91114a26efd411b4653c65b943..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/nat/nonautoregressive_ensembles.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-
-import torch
-import torch.nn.functional as F
-from fairseq.models.nat import (
- _apply_del_words,
- _apply_ins_masks,
- _apply_ins_words,
- _fill,
- _skip,
- _skip_encoder_out,
-)
-
-
-class _EnsembleModelEncoder(object):
- def __init__(self, models):
- self.models = models
-
- def reorder_encoder_out(self, encoder_outs, new_order):
- encoder_outs = [
- model.encoder.reorder_encoder_out(encoder_out, new_order)
- for model, encoder_out in zip(self.models, encoder_outs)
- ]
- return encoder_outs
-
-
-class BasicEnsembleModel(torch.nn.Module):
- """A wrapper around an ensemble of models."""
-
- def __init__(self, models):
- super().__init__()
- self.models = torch.nn.ModuleList(models)
- self.bos = self.models[0].decoder.dictionary.bos()
- self.eos = self.models[0].decoder.dictionary.eos()
- self.pad = self.models[0].decoder.dictionary.pad()
- self.unk = self.models[0].decoder.dictionary.unk()
- self.encoder = _EnsembleModelEncoder(self.models)
-
- def has_encoder(self):
- return hasattr(self.models[0], "encoder")
-
- def max_decoder_positions(self):
- return min(m.max_decoder_positions() for m in self.models)
-
- @torch.no_grad()
- def forward_encoder(self, encoder_input):
- if not self.has_encoder():
- return None
- return [model.forward_encoder(encoder_input) for model in self.models]
-
- @torch.no_grad()
- def forward_decoder(self, *inputs):
- raise NotImplementedError
-
- def initialize_output_tokens(self, *inputs):
- raise NotImplementedError
-
-
-class EnsembleLevT(BasicEnsembleModel):
- """A wrapper around an ensemble of models."""
-
- def __init__(self, models):
- super().__init__(models)
-
- @torch.no_grad()
- def forward_decoder(
- self, decoder_out, encoder_outs, eos_penalty=0.0, max_ratio=None, **kwargs
- ):
- # LevT ensembling
- # A pipeline of three steps: deletion, placeholder, and word insertion.
- # We need to average scores in each step in a pipeline way because of dependence.
- # deletion
- output_tokens = decoder_out.output_tokens
- output_scores = decoder_out.output_scores
- attn = decoder_out.attn
-
- bsz = output_tokens.size(0)
- if max_ratio is None:
- max_lens = output_tokens.new().fill_(255)
- else:
- if not encoder_outs[0]["encoder_padding_mask"]:
- src_lens = (
- encoder_outs[0]["encoder_out"][0].new(bsz)
- .fill_(encoder_outs[0]["encoder_out"][0].size(1))
- )
- else:
- src_lens = (~encoder_outs[0]["encoder_padding_mask"][0]).sum(1)
- max_lens = (src_lens * max_ratio).clamp(min=10).long()
-
- # delete words
- # do not delete tokens if it is
- can_del_word = output_tokens.ne(self.pad).sum(1) > 2
- if can_del_word.sum() != 0: # we cannot delete, skip
- output_tokens, output_scores, attn = self.forward_word_del(
- encoder_outs,
- output_tokens,
- output_scores,
- attn,
- can_del_word,
- )
-
- # insert placeholders
- can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens
- if can_ins_mask.sum() != 0:
- output_tokens, output_scores = self.forward_mask_ins(
- encoder_outs,
- output_tokens,
- output_scores,
- can_ins_mask,
- eos_penalty,
- max_lens,
- )
-
- # insert words
- can_ins_word = output_tokens.eq(self.unk).sum(1) > 0
- if can_ins_word.sum() != 0:
- output_tokens, output_scores, attn = self.forward_word_ins(
- encoder_outs,
- output_tokens,
- output_scores,
- attn,
- can_ins_word,
- )
-
- # delete some unnecessary paddings
- cut_off = output_tokens.ne(self.pad).sum(1).max()
- output_tokens = output_tokens[:, :cut_off]
- output_scores = output_scores[:, :cut_off]
- attn = None if attn is None else attn[:, :cut_off, :]
- return decoder_out._replace(
- output_tokens=output_tokens,
- output_scores=output_scores,
- attn=attn,
- history=None,
- )
-
- def forward_word_del(
- self, encoder_outs, output_tokens, output_scores, attn, can_del_word
- ):
- word_del_score_avg = []
- word_del_attn_avg = []
- for model, encoder_out in zip(self.models, encoder_outs):
- word_del_out, word_del_attn = model.decoder.forward_word_del(
- _skip(output_tokens, can_del_word),
- _skip_encoder_out(model.encoder, encoder_out, can_del_word),
- )
- word_del_score = F.log_softmax(word_del_out, 2)
- word_del_score_avg.append(word_del_score)
- word_del_attn_avg.append(word_del_attn)
- word_del_score_avg = torch.logsumexp(
- torch.stack(word_del_score_avg, dim=0), dim=0
- ) - math.log(len(self.models))
- word_del_pred = word_del_score_avg.max(-1)[1].bool()
- if word_del_attn_avg[0] is not None:
- word_del_attn_avg = torch.stack(word_del_attn_avg, dim=0) / len(self.models)
- else:
- word_del_attn_avg = None
-
- _tokens, _scores, _attn = _apply_del_words(
- output_tokens[can_del_word],
- output_scores[can_del_word],
- word_del_attn_avg,
- word_del_pred,
- self.pad,
- self.bos,
- self.eos,
- )
- output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad)
- output_scores = _fill(output_scores, can_del_word, _scores, 0)
- attn = _fill(attn, can_del_word, _attn, 0.0)
- return output_tokens, output_scores, attn
-
- def forward_mask_ins(
- self,
- encoder_outs,
- output_tokens,
- output_scores,
- can_ins_mask,
- eos_penalty,
- max_lens,
- ):
- mask_ins_score_avg = []
- for model, encoder_out in zip(self.models, encoder_outs):
- mask_ins_out, _ = model.decoder.forward_mask_ins(
- _skip(output_tokens, can_ins_mask),
- _skip_encoder_out(model.encoder, encoder_out, can_ins_mask),
- )
- mask_ins_score = F.log_softmax(mask_ins_out, 2)
- if eos_penalty > 0.0:
- mask_ins_score[:, :, 0] -= eos_penalty
- mask_ins_score_avg.append(mask_ins_score)
- mask_ins_score_avg = torch.logsumexp(
- torch.stack(mask_ins_score_avg, dim=0), dim=0
- ) - math.log(len(self.models))
- mask_ins_pred = mask_ins_score_avg.max(-1)[1]
- mask_ins_pred = torch.min(
- mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred)
- )
- _tokens, _scores = _apply_ins_masks(
- output_tokens[can_ins_mask],
- output_scores[can_ins_mask],
- mask_ins_pred,
- self.pad,
- self.unk,
- self.eos,
- )
- output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad)
- output_scores = _fill(output_scores, can_ins_mask, _scores, 0)
- return output_tokens, output_scores
-
- def forward_word_ins(
- self, encoder_outs, output_tokens, output_scores, attn, can_ins_word
- ):
- word_ins_score_avg = []
- word_ins_attn_avg = []
- for model, encoder_out in zip(self.models, encoder_outs):
- word_ins_out, word_ins_attn = model.decoder.forward_word_ins(
- _skip(output_tokens, can_ins_word),
- _skip_encoder_out(model.encoder, encoder_out, can_ins_word),
- )
- word_ins_score = F.log_softmax(word_ins_out, 2)
- word_ins_score_avg.append(word_ins_score)
- word_ins_attn_avg.append(word_ins_attn)
- word_ins_score_avg = torch.logsumexp(
- torch.stack(word_ins_score_avg, dim=0), dim=0
- ) - math.log(len(self.models))
- if word_ins_attn_avg[0] is not None:
- word_ins_attn_avg = torch.stack(word_ins_attn_avg, dim=0) / len(self.models)
- else:
- word_ins_attn_avg = None
- word_ins_score_max, word_ins_pred = word_ins_score_avg.max(-1)
-
- _tokens, _scores = _apply_ins_words(
- output_tokens[can_ins_word],
- output_scores[can_ins_word],
- word_ins_pred,
- word_ins_score_max,
- self.unk,
- )
-
- output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad)
- output_scores = _fill(output_scores, can_ins_word, _scores, 0)
- attn = _fill(attn, can_ins_word, word_ins_attn, 0.0)
- return output_tokens, output_scores, attn
-
- def initialize_output_tokens(self, encoder_outs, src_tokens):
- # LevT doesn't do length prediction.
- return self.models[0].initialize_output_tokens(encoder_outs[0], src_tokens)
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/speech_to_text/modules/emformer.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/speech_to_text/modules/emformer.py
deleted file mode 100644
index 6ef76bd012ba40b0395fec2ca9ae9e9c136ffe40..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/speech_to_text/modules/emformer.py
+++ /dev/null
@@ -1,1837 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-
-
-import math
-import re
-from functools import partial
-from typing import List, Optional, Tuple
-
-import torch
-import torch.nn as nn
-from fairseq.models import (
- FairseqEncoder,
-)
-from fairseq.models.speech_to_text.utils import (
- NoOp,
- lengths_to_padding_mask,
- segments_to_sequence,
-)
-from fairseq.models.speech_to_text.utils import (
- attention_suppression,
- layer_norm_backward_hook,
-)
-from torch import Tensor, device as Device
-from torch.quantization.qconfig import (
- default_dynamic_qconfig,
- per_channel_dynamic_qconfig,
-)
-
-
-class RelativePositionEmbedding(nn.Module):
- """
- Implementation according to https://arxiv.org/abs/1803.02155
- """
-
- def __init__(self, head_dim, max_position, norm_init=True):
- super().__init__()
- self.head_dim = head_dim
- self.max_position = max_position
- self.embeddings = nn.Parameter(torch.Tensor(max_position * 2 + 1, head_dim))
- if norm_init:
- nn.init.xavier_normal_(self.embeddings)
- else:
- nn.init.xavier_uniform_(self.embeddings)
-
- def forward(self, input: Tensor):
- output = nn.functional.embedding(input.long(), self.embeddings)
- return output
-
-
-class Fp32LayerNorm(nn.Module):
- def __init__(
- self,
- input_dim,
- clamp_grad=True,
- max_grad_value=256,
- eps=1e-5,
- elementwise_affine=True,
- ):
- super().__init__()
- self.torch_module = torch.nn.LayerNorm(
- input_dim, eps=eps, elementwise_affine=elementwise_affine
- )
- if clamp_grad:
- hook = partial(layer_norm_backward_hook, clamp_value=max_grad_value)
- self.torch_module.register_backward_hook(hook)
-
- def forward(self, input):
- output = torch.nn.functional.layer_norm(
- input.float(),
- self.torch_module.normalized_shape,
- self.torch_module.weight.float()
- if self.torch_module.weight is not None
- else None,
- self.torch_module.bias.float()
- if self.torch_module.bias is not None
- else None,
- self.torch_module.eps,
- ).type_as(input)
- return output
-
-
-# ------------------------------------------------------------------------------
-# PositionwiseFF
-# ------------------------------------------------------------------------------
-
-
-class PositionwiseFF(nn.Module):
- """
- FFN layer in transformer.
-
- Args:
- input_dim: input embedding dimension
- ffn_dim: FFN layer inner dimension
- dropout_on_fc1: dropout for first linear layer
- dropout_on_fc2: dropout fr second linear layer
- activation_fn: activation function used after first linear layer. \
- Only relu or gelu is supported.
-
- """
-
- def __init__(
- self, input_dim, ffn_dim, dropout_on_fc1, dropout_on_fc2, activation_fn
- ):
- super(PositionwiseFF, self).__init__()
-
- self.input_dim = input_dim
- self.ffn_dim = ffn_dim
- if activation_fn == "relu":
- ac = nn.ReLU()
- elif activation_fn == "gelu":
- ac = nn.GELU()
- else:
- raise ValueError("Unsupported activation_fn = ({})".format(activation_fn))
-
- # fc1 -> ac -> dropout -> fc2 -> dropout
- self.module = nn.Sequential(
- nn.Linear(input_dim, ffn_dim),
- ac,
- nn.Dropout(dropout_on_fc1),
- nn.Linear(ffn_dim, input_dim),
- nn.Dropout(dropout_on_fc2),
- )
-
- self.layer_norm = Fp32LayerNorm(input_dim)
-
- def forward(self, input):
- module_out = self.module(self.layer_norm(input))
- output = module_out + input
-
- return output
-
- def quantize_(self, params=None):
- if params and "per_channel" in params and params["per_channel"]:
- qconfig = per_channel_dynamic_qconfig
- else:
- qconfig = default_dynamic_qconfig
- torch.quantization.quantize_dynamic(
- self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
- )
- return self
-
-
-# ------------------------------------------------------------------------------
-# SummarizationLayer
-# ------------------------------------------------------------------------------
-
-
-class SummarizationLayer(nn.Module):
- def __init__(self, method, segment_size, embedding_dim):
- super(SummarizationLayer, self).__init__()
- self.segment_size = segment_size
- self.embedding_dim = embedding_dim
- nonlin_match = re.match(r"nonlinear\((?P[a-z]+),(?P[0-9]+)\)", method)
- self.method = method
- if method == "mean":
- self.module = nn.AvgPool1d(
- kernel_size=segment_size,
- stride=segment_size,
- ceil_mode=True,
- )
- elif method == "max":
- self.module = nn.MaxPool1d(
- kernel_size=segment_size,
- stride=segment_size,
- ceil_mode=True,
- )
- elif method == "linear":
- self.module = nn.Linear(segment_size, 1)
- elif nonlin_match:
- nonlin_args = nonlin_match.groupdict()
- act_type = nonlin_args["act"]
- hid_dim = int(nonlin_args["dim"])
- if act_type == "relu":
- act = nn.ReLU()
- elif act_type == "gelu":
- act = nn.GELU()
- else:
- raise ValueError("Unsupported activation_fn = ({})".format(act_type))
- self.module = nn.Sequential(
- nn.Linear(segment_size, hid_dim),
- act,
- nn.Linear(hid_dim, 1),
- )
- else:
- raise ValueError("Unsupported summarization method = ({})".format(method))
-
- def forward(self, input):
- # T, B, D -> B, D, T
- input = input.permute(1, 2, 0)
-
- if self.method == "mean" or self.method == "max":
- output = self.module(input)
- output = output.permute(2, 0, 1)
- return output
-
- full_seg_length = input.size(2) // self.segment_size * self.segment_size
- if full_seg_length > 0:
- # at least one seg is full
- B = input.size(0)
- D = input.size(1)
- input_todo = (
- input[:, :, :full_seg_length]
- .contiguous()
- .view(B, -1, self.segment_size)
- )
- output = self.module(input_todo)
- output = output.view(B, D, -1)
- else:
- output = input.new_zeros(input.size(0), input.size(1), 0)
- left = input.size(2) - full_seg_length
- if left > 0:
- # when last seg is not full, use zeros as last memory placeholder
- zeros = input.new_zeros(input.size(0), input.size(1), 1)
- output = torch.cat([output, zeros], dim=2)
- output = output.permute(2, 0, 1)
- return output
-
-
-# ------------------------------------------------------------------------------
-# NoSegAugmentedMemoryMultiheadAttentionBmm
-# ------------------------------------------------------------------------------
-
-
-class NoSegAugmentedMemoryMultiheadAttentionBmm(nn.Module):
- """
- Whole utterance augmented memory multihead attention using BMM.
-
- Different with previous augmented memory multihead attention where
- the utterance is chunked into segments. Here we use attention mask
- achieve so. The input embedding [right_context, utterance, summary]
- is a concatenation of right context, utterance and summary.
-
- Right context block is the concatenation of all the right context for
- each segments. [right_context_0, right_context_1, ..., right_context_n]
- For example, if we have utterance = [v0, v1, v2, ...., v20]. segment
- size 8, right_context size 4. Then the right context blocks =
- [v8, v9, v10, v11, v16, v17, v18, v19, 0, 0, 0, 0], where v8, v9, v10,
- and v11 are the right context for first segment. v16, v17, v18 and v19
- are the right context for second segment. 0, 0, 0 and 0 are right context
- for the last segment.
-
- utterance is corresponding to input embedding sequence
-
- summary is concatenation of average of each segments. [summary_0,
- summary_1, ..., ].
-
- In augmented memory multihead attention, the query is [right_context,
- utterance, summary], key is [memory, right_context, utterance]. Different
- with AugmentedMemoryMultiheadAttentionBmm, memory here is passed from
- previous attention layer. For the first attention layer, memory is average
- of each segment.
-
- Memory is a concatenation of memory from each segments in previous attention
- layer. For example, current layer is i, then memory is [m_0, m_1, ..., m_n].
- Each m_k is the output from seg_k in layer i-1.
-
- args:
- input_dim: input embedding dimension
- num_heads: number of heads in multihead self-attention
- dropout: attention dropout
- std_scale: if std_scale is not None. The weak attention suppression is
- turned on. For std_scale = 0.5, all the attention smaller than
- mean + 0.5 * std will be suppressed.
- scaled_init: whether to use scaled init for linear weight
- tanh_on_mem: whether to use tanh on memory output
- use_mem: whether to use memory or not. When max_memory_size is 0, then
- we don't have memory anymore.
- layer_index: current self-attention layer index that is used in depth
- initialization
- max_relative_position: max relative position used in relative position
- embedding
- rpe_old_option: To be compatible with previous model. The previous model
- was trained with attention += attention + rpe. The correct equation
- should be attention = attention + rpe
-
- """
-
- def __init__(
- self,
- input_dim,
- num_heads,
- dropout=0.0,
- std_scale=None,
- scaled_init=False,
- tanh_on_mem=False,
- use_mem=True,
- mini_batches=False,
- negative_inf="-inf",
- layer_index=-1,
- max_relative_position=0,
- rpe_old_option=True,
- ):
- if input_dim % num_heads:
- raise ValueError(
- "input_dim ({}) must be divisible by num_heads ({})".format(
- input_dim, num_heads
- )
- )
-
- super().__init__()
-
- embed_dim = input_dim
- self.e2h_kv = torch.nn.Linear(input_dim, 2 * input_dim, bias=True)
- self.e2h_q = torch.nn.Linear(input_dim, input_dim, bias=True)
- self.rpe_old_option = rpe_old_option
- if max_relative_position > 0:
- self.use_rpe = True
- self.rpe_k = RelativePositionEmbedding(
- head_dim=input_dim // num_heads,
- max_position=max_relative_position,
- )
- self.rpe_v = RelativePositionEmbedding(
- head_dim=input_dim // num_heads,
- max_position=max_relative_position,
- )
- else:
- self.use_rpe = False
- self.rpe_k = None
- self.rpe_v = None
- if scaled_init:
- if layer_index == -1:
- gain = 1.0 / math.sqrt(2)
- else:
- # https://arxiv.org/abs/2005.09684 depthwise initialization
- # stablize the training greatly. Use depthwise initialization to
- # replace incremental loss.
- gain = 1.0 / math.sqrt(layer_index + 1)
- torch.nn.init.xavier_uniform_(self.e2h_kv.weight, gain=gain)
- torch.nn.init.xavier_uniform_(self.e2h_q.weight, gain=gain)
-
- self.out_proj = torch.nn.Linear(embed_dim, embed_dim, bias=True)
-
- self.embed_dim = embed_dim
- self.num_heads = num_heads
- self.dropout = dropout
-
- self.head_dim = embed_dim // num_heads
- self.scaling = self.head_dim ** -0.5
-
- self.std_scale = std_scale
- self.use_mem = use_mem
- self.mini_batches = mini_batches
- self.negative_inf = negative_inf
-
- if tanh_on_mem:
- self.squash_mem = torch.tanh
- self.nonlinear_squash_mem = True
- else:
- self.squash_mem = NoOp()
- self.nonlinear_squash_mem = False
-
- def prepare_qkv(
- self,
- input: Tensor,
- mems: Tensor,
- lengths: Tensor,
- summary_length: int,
- lc_length: int,
- ):
- # T: right_context length + utterance_length + summary_length
- T, B, D = input.shape
- mem_length = mems.size(0)
- utterance_length = torch.max(lengths)
-
- right_context_blocks_length = T - utterance_length - summary_length
- rc_block = input[:right_context_blocks_length, :, :]
- utterance_block = input[right_context_blocks_length : T - summary_length, :, :]
-
- if B == 1:
- padding_mask = None
- else:
- klengths = lengths + mem_length + right_context_blocks_length + lc_length
- padding_mask = lengths_to_padding_mask(lengths=klengths)
-
- mem_rc_input = torch.cat([mems, rc_block, utterance_block], dim=0)
-
- # In training lc_length = 0
- key_length = mem_rc_input.size(0) + lc_length
- rc_input_sum = input
- q = self.e2h_q(rc_input_sum)
- kv = self.e2h_kv(mem_rc_input)
- k, v = kv.chunk(chunks=2, dim=2)
- result_qkv = (q, k, v)
- input_shape = (T, B, D)
- result_lengths_info = (
- mem_length,
- utterance_length,
- right_context_blocks_length,
- key_length,
- )
- if padding_mask is not None:
- assert padding_mask.size(0) == B
- assert padding_mask.size(1) == key_length
-
- return result_qkv, input_shape, result_lengths_info, padding_mask
-
- def prepare_attention_weights(
- self,
- q: Tensor,
- new_k: Tensor,
- new_v: Tensor,
- input_shape: Tuple[int, int, int],
- rpe: Optional[Tensor],
- ) -> Tuple[Tensor, Tensor, Tensor]:
- T, B, D = input_shape
- q = (
- q.contiguous().view(-1, B * self.num_heads, self.head_dim).transpose(0, 1)
- * self.scaling
- )
-
- k = (
- new_k.contiguous()
- .view(-1, B * self.num_heads, self.head_dim)
- .transpose(0, 1)
- )
-
- v = (
- new_v.contiguous()
- .view(-1, B * self.num_heads, self.head_dim)
- .transpose(0, 1)
- )
-
- attention_weights = torch.bmm(q, k.transpose(1, 2))
- if self.use_rpe and rpe is not None and self.rpe_v is not None:
- r_k = self.rpe_k(rpe)
- # [q, B*h, d] * [q, k, d] -> [B*h, q, k]
- attention_weights_rpe = torch.matmul(
- q.transpose(0, 1), r_k.transpose(1, 2)
- ).transpose(0, 1)
- attention_weights = attention_weights + attention_weights_rpe
- attention_weights_float = attention_weights.float()
-
- return attention_weights, attention_weights_float, v
-
- def prepare_attention_output(
- self,
- attention_weights: Tensor,
- attention_weights_float: Tensor,
- v: Tensor,
- input_shape: Tuple[int, int, int],
- key_length: int,
- padding_mask: Optional[Tensor],
- rpe: Optional[Tensor],
- ) -> Tensor:
- T, B, D = input_shape
- if padding_mask is not None:
- attention_weights_float = attention_weights_float.view(
- B, self.num_heads, T, key_length
- )
- attention_weights_float = attention_weights_float.masked_fill(
- padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
- )
- attention_weights_float = attention_weights_float.view(
- B * self.num_heads, T, key_length
- )
-
- if self.std_scale is not None:
- attention_weights_float = attention_suppression(
- attention_weights_float, self.std_scale
- )
-
- attention_weights_float = torch.nn.functional.softmax(
- attention_weights_float, dim=-1
- )
- attention_weights = attention_weights_float.type_as(attention_weights)
-
- attention_probs = torch.nn.functional.dropout(
- attention_weights, p=self.dropout, training=self.training
- )
-
- # [T, key_length, B, n_head]+ [key_length, B, n_head, d_head]
- # -> [T, B, n_head, d_head]
- attention = torch.bmm(attention_probs, v)
- if self.use_rpe and rpe is not None and self.rpe_v is not None:
- r_v = self.rpe_v(rpe)
- attention_rpe = torch.matmul(
- attention_probs.transpose(0, 1), r_v
- ).transpose(0, 1)
-
- if self.rpe_old_option:
- attention += attention + attention_rpe
- else:
- attention = attention + attention_rpe
-
- assert list(attention.shape) == [B * self.num_heads, T, self.head_dim]
-
- attention = attention.transpose(0, 1).contiguous().view(T, B, self.embed_dim)
-
- rc_output_memory = self.out_proj(attention)
- return rc_output_memory
-
- @torch.jit.unused
- def forward(
- self,
- input: Tensor,
- lengths: Tensor,
- mems: Tensor,
- attention_mask: Tensor,
- pre_mems: Optional[Tensor] = None,
- left_context_key: Optional[Tensor] = None,
- left_context_val: Optional[Tensor] = None,
- rpe: Optional[Tensor] = None,
- ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
- """
- forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in training.
-
- args:
- input: formed in the following way
- [right_context_0, right_contex_1, ..., seg_0, seg_1,
- ..., summary_0, summary_1,..]
- lengths: the length of query which is [seg_0, seg_1, ....]
- mems: [mem_0, mem_1, ...].
- attention_mask: attention mask for query = [right_context, query, summary]
- key = [mem, right_context, query]. This is only used for traing.
-
- """
- if self.use_mem:
- mem_length = mems.size(0)
- summary_length = mem_length + 1
- if pre_mems is not None:
- mems = torch.cat([pre_mems, mems], dim=0)
- else:
- mem_length = 0
- summary_length = 0
-
- # In training, lc_length = 0
- if left_context_key is not None:
- lc_length = left_context_key.size(0)
- else:
- lc_length = 0
- results = self.prepare_qkv(
- input=input,
- mems=mems,
- lengths=lengths,
- summary_length=summary_length,
- lc_length=lc_length,
- )
- result_qkv, input_shape, result_lengths_info, padding_mask = results
- q, k, v = result_qkv
- (
- mem_length,
- utterance_length,
- right_context_blocks_length,
- key_length,
- ) = result_lengths_info
-
- if left_context_key is not None:
- # add the cache key and value
- new_k = torch.cat(
- [
- k[: mem_length + right_context_blocks_length, :, :],
- left_context_key,
- k[-utterance_length:, :, :],
- ],
- dim=0,
- )
- new_v = torch.cat(
- [
- v[: mem_length + right_context_blocks_length, :, :],
- left_context_val,
- v[-utterance_length:, :, :],
- ],
- dim=0,
- )
- next_k = new_k[mem_length + right_context_blocks_length :, :, :]
- next_v = new_v[mem_length + right_context_blocks_length :, :, :]
- else:
- new_k = k
- new_v = v
- next_k = None
- next_v = None
-
- attention_weights, attention_weights_float, v = self.prepare_attention_weights(
- q=q,
- new_k=new_k,
- new_v=new_v,
- input_shape=input_shape,
- rpe=rpe,
- )
-
- # mask attention
- attention_mask = attention_mask.unsqueeze(0)
- attention_weights_float = attention_weights_float.masked_fill(
- attention_mask, float(self.negative_inf)
- )
-
- rc_output_memory = self.prepare_attention_output(
- attention_weights=attention_weights,
- attention_weights_float=attention_weights_float,
- v=v,
- input_shape=input_shape,
- key_length=key_length,
- padding_mask=padding_mask,
- rpe=rpe,
- )
-
- if self.use_mem:
- # next_m length equals to summary length - 1
- # last memory is ignored
- if self.mini_batches:
- next_m = rc_output_memory[-summary_length:]
- else:
- next_m = rc_output_memory[-summary_length:-1]
-
- next_m = self.squash_mem(next_m)
- # rc and output
- rc_output = rc_output_memory[:-summary_length]
- if not self.nonlinear_squash_mem:
- next_m = torch.clamp(next_m, min=-10, max=10)
- else:
- next_m = mems
- rc_output = rc_output_memory
-
- return rc_output, next_m, next_k, next_v
-
- @torch.jit.export
- def forward_jit(
- self,
- input: Tensor,
- lengths: Tensor,
- mems: Tensor,
- left_context_key: Tensor,
- left_context_val: Tensor,
- rpe: Optional[Tensor],
- ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
- """
- forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in decoding.
-
- args:
- input: formed in the following way
- [right_context_0, right_contex_1, ..., seg_0, seg_1,
- ..., summary_0, summary_1,..]
- lengths: the length of query which is [seg_0, seg_1, ....]
- mems: [mem_0, mem_1, ...].
- left_context_key: left_context for key part. This is only used for online
- decoding. In training, this is empty tensor
- left_context_val: left_context for value part. This is only used for online
- decoding. In training, this is empty tensor
-
- """
- lc_length = left_context_key.size(0)
-
- # In decoding, summary_length = 1 or 0
- if self.use_mem:
- summary_length = 1
- else:
- summary_length = 0
-
- results = self.prepare_qkv(
- input=input,
- mems=mems,
- lengths=lengths,
- summary_length=summary_length,
- lc_length=lc_length,
- )
- result_qkv, input_shape, result_lengths_info, padding_mask = results
- q, k, v = result_qkv
- (
- mem_length,
- utterance_length,
- right_context_blocks_length,
- key_length,
- ) = result_lengths_info
-
- # add the cache key and value
- new_k = torch.cat(
- [
- k[: mem_length + right_context_blocks_length, :, :],
- left_context_key,
- k[-utterance_length:, :, :],
- ],
- dim=0,
- )
- new_v = torch.cat(
- [
- v[: mem_length + right_context_blocks_length, :, :],
- left_context_val,
- v[-utterance_length:, :, :],
- ],
- dim=0,
- )
- next_k = new_k[mem_length + right_context_blocks_length :, :, :]
- next_v = new_v[mem_length + right_context_blocks_length :, :, :]
-
- attention_weights, attention_weights_float, v = self.prepare_attention_weights(
- q=q,
- new_k=new_k,
- new_v=new_v,
- input_shape=input_shape,
- rpe=rpe,
- )
- # In online decoding, we don't have attention mask. But we still need
- # to disable the attention from summary query to memory
- attention_weights_float[:, -1, :mem_length] = float(self.negative_inf)
- rc_output_memory = self.prepare_attention_output(
- attention_weights=attention_weights,
- attention_weights_float=attention_weights_float,
- v=v,
- input_shape=input_shape,
- key_length=key_length,
- padding_mask=padding_mask,
- rpe=rpe,
- )
-
- # In decoding, summary length is 1
- if self.use_mem:
- next_m = rc_output_memory[-1:]
- next_m = self.squash_mem(next_m)
- # rc and output
- rc_output = rc_output_memory[:-1]
- if not self.nonlinear_squash_mem:
- next_m = torch.clamp(next_m, min=-10, max=10)
- else:
- rc_output = rc_output_memory
- # empty tensor as input mems
- next_m = mems
-
- return rc_output, next_m, next_k, next_v
-
- def quantize_(self, params=None):
- if params and "per_channel" in params and params["per_channel"]:
- qconfig = per_channel_dynamic_qconfig
- else:
- qconfig = default_dynamic_qconfig
- torch.quantization.quantize_dynamic(
- self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
- )
- return self
-
-
-class NoSegAugmentedMemoryTransformer(nn.Module):
- """
- Whole utterance augmented memory transformer.
-
- This is not pyspeech nn layer. It is used as a module in a master layer where
- multiple transformers is used.
- """
-
- def __init__(
- self,
- input_dim,
- num_heads,
- ffn_dim,
- dropout_in_attn=0.0,
- dropout_on_attn=None,
- dropout_on_fc1=None,
- dropout_on_fc2=None,
- activation_fn="relu",
- tanh_on_mem=False,
- std_scale=None,
- scaled_init=False,
- segment_size=128,
- use_mem=True,
- mini_batches=False,
- negative_inf="-inf",
- layer_index=-1,
- summarization_method="mean",
- max_relative_position=0,
- rpe_old_option=True,
- ):
- super(NoSegAugmentedMemoryTransformer, self).__init__()
-
- self.attention = NoSegAugmentedMemoryMultiheadAttentionBmm(
- input_dim=input_dim,
- num_heads=num_heads,
- dropout=dropout_in_attn,
- scaled_init=scaled_init,
- tanh_on_mem=tanh_on_mem,
- std_scale=std_scale,
- use_mem=use_mem,
- mini_batches=mini_batches,
- negative_inf=negative_inf,
- layer_index=layer_index,
- max_relative_position=max_relative_position,
- )
- self.dropout = nn.Dropout(dropout_on_attn)
- self.pos_ff = PositionwiseFF(
- input_dim=input_dim,
- ffn_dim=ffn_dim,
- dropout_on_fc1=dropout_on_fc1,
- dropout_on_fc2=dropout_on_fc2,
- activation_fn=activation_fn,
- )
- self.layer_norm_pre = Fp32LayerNorm(input_dim)
- self.layer_norm = Fp32LayerNorm(input_dim)
- self.segment_size = segment_size
- self.use_mem = use_mem
-
- self.memory_op = SummarizationLayer(
- summarization_method, segment_size, input_dim
- )
-
- def set_mini_batches(self, mini_batches):
- self.attention.mini_batches = mini_batches
-
- def gen_summary_queries(self, input):
- sum_input = self.memory_op(input)
- return sum_input
-
- def pre_attention_ops(self, input, right_context_blocks):
- rc_length = right_context_blocks.size(0)
- input_length = input.size(0)
-
- rc_and_input = torch.cat([right_context_blocks, input], dim=0)
- residual_input = rc_and_input
- rc_and_input = self.layer_norm_pre(rc_and_input)
-
- query_input = rc_and_input[-input_length:, :, :]
- return rc_length, input_length, residual_input, query_input, rc_and_input
-
- def after_attention_ops(self, attention_output, residual_input):
- output = self.dropout(attention_output)
- output = output + residual_input
- output = self.pos_ff(output)
- output = self.layer_norm(output)
- return output
-
- @torch.jit.export
- def forward_jit(
- self,
- input: Tensor,
- lengths: Tensor,
- mems: Tensor,
- left_context_key: Tensor,
- left_context_val: Tensor,
- right_context_blocks: Tensor,
- rpe: Optional[Tensor],
- ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
-
- results = self.pre_attention_ops(input, right_context_blocks)
- rc_length, input_length, residual_input, query_input, rc_and_input = results
-
- # In online decoding, the summary query size is always 1 or 0
- if self.use_mem:
- summary_query = self.gen_summary_queries(query_input)
- summary_query = summary_query[0:1, :, :]
- rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0)
- else:
- rc_qu_su = rc_and_input
-
- rc_output, next_m, next_k, next_v = self.attention.forward_jit(
- input=rc_qu_su,
- lengths=lengths,
- mems=mems,
- left_context_key=left_context_key,
- left_context_val=left_context_val,
- rpe=rpe,
- )
- rc_output = self.after_attention_ops(rc_output, residual_input)
- results = (
- rc_output[-input_length:, :, :],
- next_m,
- rc_output[0:rc_length, :, :],
- next_k,
- next_v,
- )
- return results
-
- @torch.jit.unused
- def forward(
- self,
- input,
- lengths,
- mems,
- right_context_blocks,
- attention_mask,
- pre_mems,
- left_context_key,
- left_context_val,
- rpe,
- ):
-
- results = self.pre_attention_ops(input, right_context_blocks)
- rc_length, input_length, residual_input, query_input, rc_and_input = results
- if self.use_mem:
- summary_query = self.gen_summary_queries(query_input)
- rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0)
- else:
- rc_qu_su = rc_and_input
-
- rc_output, next_m, next_k, next_v = self.attention(
- input=rc_qu_su,
- lengths=lengths,
- mems=mems,
- attention_mask=attention_mask,
- pre_mems=pre_mems,
- left_context_key=left_context_key,
- left_context_val=left_context_val,
- rpe=rpe,
- )
-
- # [TODO] Note memory did not go through pos_ff. What happen if we pass
- # memory through the pos_ff as well?
- rc_output = self.after_attention_ops(rc_output, residual_input)
- results = (
- rc_output[-input_length:, :, :],
- next_m,
- rc_output[0:rc_length, :, :],
- next_k,
- next_v,
- )
-
- return results
-
-
-class NoSegAugmentedMemoryTransformerEncoderLayer(FairseqEncoder):
- """
- Whole utterance augmented memory transformer encoder layer. This is a master layer
- where we can define multiple augmented memory transformers. There are two reasons
- to setup the master layer.
- 1. We only need to define once about the attention mask. All the layers in the master
- layer share the same mask.
- 2. pyspeech nn layer has special input and output format. Defining one master layer is
- easier to passing memory between different layes inside the master layer
-
- args:
- input_dim: input embedding dimension
- num_heads: number of heads in multihead self-attention
- ffn_dim: ffn dimension in FFN layer
- num_layers: number of augmented memory transformer layers
- dropout_in_attn: dropout used in multi-head self-attention
- dropout_on_attn: dropout used for output from te multihead self-attention
- dropout_on_fc1: dropout used in FFN layer for the first linear layer
- dropout_on_fc2: dropout used in FFN layer for the second linear layer
- segment_size: segment size for each segment
- context_config: (left_context_size, right_context_size) defines the surround context size
- for each segment
- max_memory_size: maximum memory size used for each segment
- scaled_init: whether use scaled init for weight initialization in attention layer
- std_scale: if std_scale is not None. The weak attention suppression is
- turned on. For std_scale = 0.5, all the attention smaller than
- mean + 0.5 * std will be suppressed.
- activation_fn: activation function used in FFN layer. [ReLU, GELU] supported
- tanh_on_mem: whether use tanh on memory
- mini_batches: use mini-btach training
- negative_inf: the negative infinity value used in attention masking. default is "-inf".
- For some situation, e.g. LM. it is better to use "-1e8" to avoid nan issue.
- summarization_method: method to generate segment summrization embedding
- max_relative_position: max relatie position for relative position embedding
- rpe_old_option: To be compatible with previous model. The previous model
- was trained with attention += attention + rpe. The correct equation
- should be attention = attention + rpe
- [TODO]: remove the rpe_old_option by the end of 2021 Q1.
-
- """
-
- def __init__(
- self,
- input_dim,
- num_heads,
- ffn_dim,
- num_layers=1,
- dropout_in_attn=0.0,
- dropout_on_attn=0.0,
- dropout_on_fc1=0.0,
- dropout_on_fc2=0.0,
- segment_size=128,
- context_config=(0, 0),
- max_memory_size=0,
- scaled_init=True,
- std_scale=None,
- activation_fn="relu",
- tanh_on_mem=False,
- mini_batches=False,
- negative_inf="-inf",
- deep_init=True,
- summarization_method="mean",
- max_relative_position=0,
- rpe_old_option=True,
- ):
- super().__init__(None)
- if input_dim % num_heads:
- raise ValueError(
- "input_dim ({}) must be divisible by num_heads ({})".format(
- input_dim, num_heads
- )
- )
-
- # we used to support growing memory size. However, it will cause
- # cross stream batching failure. Now we need to have exact max memory size
- if max_memory_size < 0:
- raise ValueError("max_memory_size must be >= 0")
-
- # Only assign right_context. In decoding, left context will be cached.
- # No need to let the online decoder to re-assign the left context
- self.left_context, self.right_context = context_config
- self.segment_size = segment_size
- self.memory_dim = input_dim
- self.max_memory_size = max_memory_size
- self.mini_batches = mini_batches
- if self.max_memory_size != 0:
- self.use_mem = True
- else:
- self.use_mem = False
-
- self.memory_op = SummarizationLayer(
- summarization_method, segment_size, input_dim
- )
-
- self.layers = torch.nn.ModuleList()
- self.num_layers = num_layers
- self.max_relative_position = max_relative_position
- if self.max_relative_position > 0:
- self.use_rpe = True
- else:
- self.use_rpe = False
- for i in range(self.num_layers):
- if deep_init:
- layer_index = i
- else:
- layer_index = -1
-
- self.layers.append(
- NoSegAugmentedMemoryTransformer(
- num_heads=num_heads,
- input_dim=input_dim,
- ffn_dim=ffn_dim,
- dropout_in_attn=dropout_in_attn,
- dropout_on_attn=dropout_on_attn,
- dropout_on_fc1=dropout_on_fc1,
- dropout_on_fc2=dropout_on_fc2,
- segment_size=segment_size,
- std_scale=std_scale,
- activation_fn=activation_fn,
- tanh_on_mem=tanh_on_mem,
- scaled_init=scaled_init,
- use_mem=self.use_mem,
- mini_batches=mini_batches,
- negative_inf=negative_inf,
- layer_index=layer_index,
- summarization_method=summarization_method,
- max_relative_position=max_relative_position,
- rpe_old_option=rpe_old_option,
- )
- )
-
- def set_mini_batches(self, mini_batches):
- # handy function only used for unit test
- self.mini_batches = mini_batches
- for layer in self.layers:
- layer.set_mini_batches(mini_batches)
-
- def _get_relative_position(
- self,
- input: Tensor,
- max_relative_position: int,
- left_context_length: int,
- past_length: int,
- is_decoding: bool,
- ):
- # For training, we copy the right context to the start of the utterance
- # First dimension in distance is corresponding to query.
- # [right context, utterance, summary vector]
- # Second dimension in distance is corresponding to key.
- # [Memory bank, right context, utterance]
- # For summary vector in query part, the distance with
- # all other position is 2*max_position. For memory bank in key,
- # the distance with all other positions is 0.
-
- T, B, D = input.shape
- num_segs = math.ceil((T - self.right_context) / self.segment_size)
-
- # utterance
- u_st = past_length * self.segment_size
- u_ed = u_st + T
- utterance_ranges = torch.arange(u_st, u_ed - self.right_context)
-
- # left context. Only in minibatch or decoding
- left_context_ranges = torch.arange(u_st - left_context_length, u_st)
-
- # Right context block
- # right context + utterance
- right_context_blocks = []
- for i in range(0, num_segs - 1):
- st = (i + 1) * self.segment_size + u_st
- ed = st + self.right_context
- assert ed < u_ed
- temp = torch.arange(st, ed)
- right_context_blocks.append(temp)
- right_context_blocks.append(torch.arange(u_ed - self.right_context, u_ed))
- right_context_ranges = torch.cat(right_context_blocks)
-
- if self.use_mem:
- # Memory bank
- # The position for memory -n, .., -1
- if is_decoding:
- memory_size = min(past_length, self.max_memory_size)
- else:
- memory_size = num_segs + past_length - 1
- memory_bank_ranges = torch.arange(
- -max_relative_position - 1, -max_relative_position - 1 - memory_size, -1
- )
-
- # summary vector
- # The position for summary vector as the T+max_relative_position+1.
- # After the clamping, the relative position is max_relative_position
- summary_pos_st = u_ed + max_relative_position + 1
- summary_vector_ranges = torch.arange(
- summary_pos_st, summary_pos_st + num_segs
- )
-
- key_ranges = torch.cat(
- [
- memory_bank_ranges,
- right_context_ranges,
- left_context_ranges,
- utterance_ranges,
- ]
- )
-
- query_ranges = torch.cat(
- [right_context_ranges, utterance_ranges, summary_vector_ranges]
- )
- else:
- key_ranges = torch.cat(
- [right_context_ranges, left_context_ranges, utterance_ranges]
- )
-
- query_ranges = torch.cat([right_context_ranges, utterance_ranges])
-
- distance = key_ranges[None, :] - query_ranges[:, None]
- distance_clamp = (
- torch.clamp(distance, -max_relative_position, max_relative_position)
- + max_relative_position
- )
- distance_clamp = distance_clamp.to(input.device).long().detach()
- return distance_clamp
-
- def _get_attention_mask(self, input, past_length=0, left_context_cache=0):
- # attention mask for each query contains three parts:
- # 1. memory part
- # 2. left_context + segment
- # 3. right_context_block
- # so for each segment and its correspoinding right context block,
- # the attention matrix is formed by 9 parts:
- # [0, m, 0, 0, right_context, 0, 0, seg, 0]
- # [before memory, memory, after memory, before right context, right_context,
- # after right context, before seg, seg, after seg]
- #
- # Query is formed in the way as [right_context_blocks, utterance, summary]
- #
- # Note: put m and right_context before segment is convenient
- # for padding_mask operation.
- # Key lengths = m_length + right_context_block_length + lengths
- utterance_length, batch_size, _ = input.shape
- summary_length = math.ceil(utterance_length / self.segment_size)
- num_segs = summary_length
- rc_length = self.right_context * num_segs
- rc = self.right_context
- lc = self.left_context
-
- # using mini-batches, there is left context cache available for current
- # sequence.
- lcc = left_context_cache
-
- # max_memory_size is 0 then we don't have memory and summary
- # past_length is the memory carry from previous sequence
- if self.use_mem:
- mem_length = num_segs - 1 + past_length
- else:
- mem_length = 0
- rc_mask = []
- query_mask = []
- summary_mask = []
- for j in range(0, num_segs):
- ssize = min(self.segment_size, utterance_length - j * self.segment_size)
-
- rc_size = rc
- rc_mat = []
- q_mat = []
- s_mat = []
- m_start = max(j + past_length - self.max_memory_size, 0)
-
- # max_memory_size is 0, then we don't use memory
- if self.use_mem:
- # part 0: before memory
- rc_mat.append(input.new_zeros(rc_size, m_start))
- q_mat.append(input.new_zeros(ssize, m_start))
- s_mat.append(input.new_zeros(1, m_start))
-
- # part 1: memory
- col_1 = j + past_length - m_start
- rc_mat.append(torch.ones(rc_size, col_1, device=input.device))
- q_mat.append(torch.ones(ssize, col_1, device=input.device))
- # based on D22875746, disable summary query attention
- # on memeory is better for long form utterance
- s_mat.append(input.new_zeros(1, col_1))
-
- # part 2: after memory
- col_2 = mem_length - (j + past_length)
- rc_mat.append(input.new_zeros(rc_size, col_2))
- q_mat.append(input.new_zeros(ssize, col_2))
- s_mat.append(input.new_zeros(1, col_2))
-
- # part 3: before right context
- rc_start = j * rc
- rc_mat.append(input.new_zeros(rc_size, rc_start))
- q_mat.append(input.new_zeros(ssize, rc_start))
- s_mat.append(input.new_zeros(1, rc_start))
-
- # part 4: right context
- rc_end = rc_start + rc
- col_4 = rc
- rc_mat.append(torch.ones(rc_size, col_4, device=input.device))
- q_mat.append(torch.ones(ssize, col_4, device=input.device))
- s_mat.append(torch.ones(1, col_4, device=input.device))
-
- # part 5: after right context
- col_5 = rc_length - rc_end
- rc_mat.append(input.new_zeros(rc_size, col_5))
- q_mat.append(input.new_zeros(ssize, col_5))
- s_mat.append(input.new_zeros(1, col_5))
-
- # part 6: before query segment
- seg_start = max(j * self.segment_size + lcc - lc, 0)
- rc_mat.append(input.new_zeros(rc_size, seg_start))
- q_mat.append(input.new_zeros(ssize, seg_start))
- s_mat.append(input.new_zeros(1, seg_start))
-
- # part 7: query segment
- # note: right context is put in right context block
- # here we only need to consider about left context
- seg_end = min((j + 1) * self.segment_size + lcc, utterance_length + lcc)
- col_7 = seg_end - seg_start
- rc_mat.append(torch.ones(rc_size, col_7, device=input.device))
- q_mat.append(torch.ones(ssize, col_7, device=input.device))
- s_mat.append(torch.ones(1, col_7, device=input.device))
-
- # part 8: after query segment
- col_8 = utterance_length + lcc - seg_end
- rc_mat.append(input.new_zeros(rc_size, col_8))
- q_mat.append(input.new_zeros(ssize, col_8))
- s_mat.append(input.new_zeros(1, col_8))
-
- rc_mask.append(torch.cat(rc_mat, dim=1))
- query_mask.append(torch.cat(q_mat, dim=1))
- summary_mask.append(torch.cat(s_mat, dim=1))
-
- # no memory, then we don't need summary either
- if self.use_mem:
- attention_mask = (
- 1
- - torch.cat(
- [
- torch.cat(rc_mask, dim=0),
- torch.cat(query_mask, dim=0),
- torch.cat(summary_mask, dim=0),
- ],
- dim=0,
- )
- ).to(torch.bool)
- else:
- attention_mask = (
- 1
- - torch.cat(
- [torch.cat(rc_mask, dim=0), torch.cat(query_mask, dim=0)], dim=0
- )
- ).to(torch.bool)
-
- return attention_mask
-
- @torch.jit.export
- def init_state(
- self, batch_size: int, device: Optional[Device] = None
- ) -> List[Tensor]:
- empty_memory = torch.zeros(
- self.num_layers,
- self.max_memory_size,
- batch_size,
- self.memory_dim,
- device=device,
- )
- left_context_key = torch.zeros(
- self.num_layers,
- self.left_context,
- batch_size,
- self.memory_dim,
- device=device,
- )
- left_context_val = torch.zeros(
- self.num_layers,
- self.left_context,
- batch_size,
- self.memory_dim,
- device=device,
- )
- past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device)
-
- return [empty_memory, left_context_key, left_context_val, past_length]
-
- @torch.jit.export
- def batch_state(self, states: List[List[Tensor]]) -> List[Tensor]:
- if len(states) == 0:
- return []
- batched_m = []
- batched_lc_key = []
- batched_lc_val = []
- batched_past_length = []
- for state in states:
- if len(state) == 0:
- continue
- m, lc_key, lc_val, past_length = state
- batched_m.append(m)
- batched_lc_key.append(lc_key)
- batched_lc_val.append(lc_val)
- batched_past_length.append(past_length)
-
- if (
- (len(batched_m) == 0)
- or (len(batched_lc_key) == 0)
- or (len(batched_lc_val) == 0)
- or (len(batched_past_length) == 0)
- ):
- return [
- torch.tensor([]),
- torch.tensor([]),
- torch.tensor([]),
- torch.tensor([]),
- ]
-
- batched_m = torch.cat(batched_m, dim=2)
- batched_lc_key = torch.cat(batched_lc_key, dim=2)
- batched_lc_val = torch.cat(batched_lc_val, dim=2)
- batched_past_length = torch.cat(batched_past_length, dim=1)
- return [batched_m, batched_lc_key, batched_lc_val, batched_past_length]
-
- @torch.jit.export
- def reorder_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]:
- if len(state) == 0:
- return []
- m, lc_key, lc_val, past_length = state
- indices = indices.to(device=m.device)
- reord_m = torch.index_select(m, 2, indices)
- reord_lc_key = torch.index_select(lc_key, 2, indices)
- reord_lc_val = torch.index_select(lc_val, 2, indices)
- reord_past_length = torch.index_select(past_length, 1, indices)
- return [reord_m, reord_lc_key, reord_lc_val, reord_past_length]
-
- @torch.jit.export
- def reset_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]:
- m, lc_key, lc_val, past_length = state
- m = m.index_fill(dim=2, index=indices, value=0.0)
- lc_key = lc_key.index_fill(dim=2, index=indices, value=0.0)
- lc_val = lc_val.index_fill(dim=2, index=indices, value=0.0)
- past_length = past_length.index_fill(dim=1, index=indices, value=0)
-
- return [m, lc_key, lc_val, past_length]
-
- @torch.jit.export
- def state_size(self) -> int:
- return 4
-
- @torch.jit.export
- def batch_size_in_state(
- self, state: Optional[List[Tensor]], sloppy: bool = True
- ) -> Optional[int]:
- if state is None:
- return None
- return state[0].size(2)
-
- def gen_summary_queries(self, input):
- sum_input = self.memory_op(input)
- return sum_input
-
- def _gen_right_context_padded_input(self, input):
- # This function deals with input that is already
- # padded with right context (e.g. minibatch training)
- right_context_blocks = []
- T, B, D = input.shape
- num_segs = math.ceil((T - self.right_context) / self.segment_size)
- for i in range(0, num_segs - 1):
- st = (i + 1) * self.segment_size
- ed = st + self.right_context
- assert ed < T
- temp = input[st:ed, :, :]
- right_context_blocks.append(temp)
-
- # last segment right context is already available
- right_context_blocks.append(input[T - self.right_context :, :, :])
- return torch.cat(right_context_blocks, dim=0)
-
- def _gen_segs_right_context(self, input, lengths):
- segments = []
- T, B, D = input.size()
- nT = T - self.right_context
-
- # assume input is right context padded
- num_segs = math.ceil(nT / self.segment_size)
- # pad zeros to the utterance to make sure each
- # segment has the same right context. For the
- for i in range(0, num_segs - 1):
- st = i * self.segment_size
- ed = min(T, st + self.segment_size + self.right_context)
- temp = input[st:ed, :, :]
- rest_lengths = torch.clamp(
- lengths - self.segment_size, min=0, max=nT - (i + 1) * self.segment_size
- )
- segments.append((temp, lengths - rest_lengths + self.right_context))
- lengths = rest_lengths
-
- last_seg = input[st + self.segment_size :, :, :]
- segments.append((last_seg, rest_lengths + self.right_context))
-
- return segments
-
- @torch.jit.unused
- def forward(
- self, input: Tensor, padding_masks: Tensor, state: Optional[List[Tensor]] = None
- ) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]:
- # Xutai: originally the second argument is lengths.
- lengths = (~padding_masks).sum(dim=1).long()
- # mini batch training.
- if self.mini_batches:
- return self.forward_mini_batches(input, lengths, state)
-
- # regular full sequence training. Note, assume the right context in provided
- # in the input.
- T, B, D = input.size()
- right_context_blocks = self._gen_right_context_padded_input(input)
-
- # generate the relative positional embedding
- if self.use_rpe:
- rpe = self._get_relative_position(
- input=input,
- max_relative_position=self.max_relative_position,
- left_context_length=0,
- past_length=0,
- is_decoding=False,
- )
- else:
- rpe = None
- input = input[: T - self.right_context, :, :]
-
- attention_mask = self._get_attention_mask(input)
-
- # firt layer use each segment mean as memory
- # ignore the last one seg average
- if self.use_mem:
- mems = self.gen_summary_queries(input)[:-1, :, :]
- else:
- mems = torch.zeros(0, input.size(1), input.size(2), device=input.device)
- mems = mems.type_as(input)
-
- output = input
- all_outputs = []
-
- for layer in self.layers:
- output, mems, right_context_blocks, _, _ = layer(
- input=output,
- lengths=lengths,
- attention_mask=attention_mask,
- mems=mems,
- right_context_blocks=right_context_blocks,
- pre_mems=None,
- left_context_key=None,
- left_context_val=None,
- rpe=rpe,
- )
- all_outputs.append(output)
- return output, padding_masks, [], all_outputs
-
- def forward_jit_mini_batch_init(
- self,
- seg: Tensor,
- state: Optional[List[Tensor]] = None,
- is_decoding: bool = False,
- ):
- # Prepare state. In whole sequence training, state is ignored.
- # For minibatch training, we need to prepare state
- if state is None:
- state = self.init_state(batch_size=seg.size(1), device=seg.device)
- if seg.dtype == torch.half:
- state = [state[0].half(), state[1].half(), state[2].half(), state[3]]
-
- if self.use_mem:
- # note input average only on seg, not on right context
- # first layer use each segmetn mean as memory. the last
- # one segment average is used in state
- full_mems = self.gen_summary_queries(seg)
- if is_decoding:
- mems = full_mems[0:1, :, :]
- state_mems = torch.cat([state[0][0], mems], dim=0)
- else:
- mems = full_mems[:-1, :, :]
- state_mems = torch.cat([state[0][0], full_mems], dim=0)
- else:
- mems = state[0][0]
- state_mems = mems
-
- # track processed segment number or memory number
- # the same batch as the same bumber of past length
- past_length = state[3][0][0].item()
- past_left_context = min(past_length * self.segment_size, self.left_context)
- past_length = min(self.max_memory_size, past_length)
-
- return state, mems, state_mems, past_length, past_left_context
-
- def state_update_before(
- self, layer: int, state: List[Tensor], past_length: int, past_left_context: int
- ):
- pre_mems = state[0][layer][self.max_memory_size - past_length :, :, :]
- lc_key = state[1][layer][self.left_context - past_left_context :, :, :]
- lc_val = state[2][layer][self.left_context - past_left_context :, :, :]
- return pre_mems, lc_key, lc_val
-
- def state_update_after(
- self,
- layer: int,
- state: List[Tensor],
- mems: Tensor,
- next_key: Tensor,
- next_val: Tensor,
- mems_list: List[Tensor],
- lc_key_list: List[Tensor],
- lc_val_list: List[Tensor],
- ):
- # mems is used for next layer
- if layer < self.num_layers - 1:
- state_mems = torch.cat([state[0][layer + 1], mems], dim=0)
- mems_list.append(state_mems[-self.max_memory_size :, :, :])
-
- # when mems pass to next sequence, we need the last memory. when mems
- # use for the next layer, we can ignore the last memory
- mems = mems[:-1, :, :]
-
- # note state[1][i] and state[2][i] original length equals to self.left_context
- new_k = torch.cat([state[1][layer], next_key], dim=0)
- new_v = torch.cat([state[2][layer], next_val], dim=0)
- lc_key_list.append(new_k[-self.left_context :, :, :])
- lc_val_list.append(new_v[-self.left_context :, :, :])
- return mems_list, lc_key_list, lc_val_list, mems
-
- def state_update_after_loop(
- self,
- state: List[Tensor],
- mems_list: List[Tensor],
- lc_key_list: List[Tensor],
- lc_val_list: List[Tensor],
- update_length: int,
- ):
- state[0] = torch.stack(mems_list, dim=0)
- state[1] = torch.stack(lc_key_list, dim=0)
- state[2] = torch.stack(lc_val_list, dim=0)
- state[3] = state[3] + update_length
- return state
-
- @torch.jit.unused
- def forward_mini_batches(
- self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
- ) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]:
- T, B, D = input.size()
-
- # input without right context
- seg = input[: T - self.right_context, :, :]
-
- # get right context blocks
- right_context_blocks = self._gen_right_context_padded_input(input)
-
- mems_list = []
- lc_key_list = []
- lc_val_list = []
- results = self.forward_jit_mini_batch_init(seg, state, False)
- state, mems, state_mems, past_length, past_left_context = results
-
- # relative position embedding
- if self.use_rpe:
- rpe = self._get_relative_position(
- input=input,
- max_relative_position=self.max_relative_position,
- left_context_length=past_left_context,
- past_length=past_length,
- is_decoding=False,
- )
- else:
- rpe = None
-
- # get attention mask based on seg (not include right context) and available
- # left context
- attention_mask = self._get_attention_mask(seg, past_length, past_left_context)
- mems_list.append(state_mems[-self.max_memory_size :, :, :])
- output = seg
- i = 0
- all_outputs = []
- for layer in self.layers:
- # In order to make cross stream batching work, mem, left context key
- # and left context value in the state should always be the same shape.
- # We use the past length to track the processed segment number. In this
- # way, we take out the essential memory, left context key and left
- # context val from the state. After finish the forward for current segment
- # we add the new memory, left context key and left context value into the
- # staate and trim out the oldest part to keep the shape consistent.
- pre_mems, lc_key, lc_val = self.state_update_before(
- i, state, past_length, past_left_context
- )
-
- output, mems, right_context_blocks, next_key, next_val = layer.forward(
- input=output,
- lengths=lengths,
- attention_mask=attention_mask,
- mems=mems,
- right_context_blocks=right_context_blocks,
- pre_mems=pre_mems,
- left_context_key=lc_key,
- left_context_val=lc_val,
- rpe=rpe,
- )
- all_outputs.append(output)
- mems_list, lc_key_list, lc_val_list, mems = self.state_update_after(
- layer=i,
- state=state,
- mems=mems,
- next_key=next_key,
- next_val=next_val,
- mems_list=mems_list,
- lc_key_list=lc_key_list,
- lc_val_list=lc_val_list,
- )
-
- i += 1
-
- # update state
- update_length = math.ceil((T - self.right_context) / self.segment_size)
- state = self.state_update_after_loop(
- state=state,
- mems_list=mems_list,
- lc_key_list=lc_key_list,
- lc_val_list=lc_val_list,
- update_length=update_length,
- )
-
- return output, lengths, state, all_outputs
-
- def forward_jit_test(
- self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
- ) -> Tuple[Tensor, Tensor, List[Tensor]]:
- """
- This one simulate sequence encoder forward jit. This is for unit test purpose.
- It is not used in training or decoding. Note, extra_right_context is set in
- the model. In unit test, input = [utterance, right_context], lengths =
- [utterance_length].
- args:
- input: input utterance
- lengths: utterance input length
- state: None here. input is whole utterance
- """
- # [TODO] sequence_to_segment has bug in lengths.
- seg_src_tokens_lengths = self._gen_segs_right_context(input, lengths)
-
- seg_enc_tokens_lengths: List[Tuple[Tensor, Tensor]] = []
- state: Optional[List[Tensor]] = None
- for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths:
- seg_enc_tokens, seg_enc_lengths, state = self.forward_jit(
- input=seg_src_tokens, lengths=seg_src_lengths, state=state
- )
- seg_enc_tokens_lengths.append((seg_enc_tokens, seg_enc_lengths))
-
- enc_tokens, enc_lengths = segments_to_sequence(
- segments=seg_enc_tokens_lengths, time_axis=0
- )
-
- state = [] # returns trivial state
-
- return enc_tokens, enc_lengths, state
-
- @torch.jit.export
- def forward_jit(
- self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
- ) -> Tuple[Tensor, Tensor, List[Tensor]]:
- """
- Forward helper for online decoding.
-
- args:
- input: [seg, right_context]. We assume in online we
- always padding the right context to the preset right context size.
- For the last segment, we may have short segment size, but right
- context size is the same as other segments
- lengths: utterance input length is the utterance segment length and
- right context size
- state: [memory, left_context_key, left_context_val]. To improve throughput,
- in addition to memory, we also cache key and value for left_context in
- multihead self-attention
- """
- # In online decoding, input = [segment, right_context]
- # Lengths = [segment_length, right_context_length]
- # so we need strip right context in output
- T, B, D = input.size()
- rc_str = T - self.right_context
- rc_end = T
- right_context_blocks = input[rc_str:rc_end, :, :]
- seg = input[:rc_str, :, :]
- lengths = torch.clamp(lengths - self.right_context, min=0)
- mems_list = []
- lc_key_list = []
- lc_val_list = []
-
- results = self.forward_jit_mini_batch_init(seg, state, True)
- state, mems, state_mems, past_length, past_left_context = results
-
- # relative position embedding
- if self.use_rpe:
- rpe = self._get_relative_position(
- input=input,
- max_relative_position=self.max_relative_position,
- left_context_length=past_left_context,
- past_length=past_length,
- is_decoding=True,
- )
- else:
- rpe = None
-
- # memory for first layer.
- mems_list.append(state_mems[-self.max_memory_size :, :, :])
- output = seg
- i = 0
- for layer in self.layers:
- # In order to make cross stream batching work, mem, left context key
- # and left context value in the state should always be the same shape.
- # We use the past length to track the processed segment number. In this
- # way, we take out the essential memory, left context key and left
- # context val from the state. After finish the forward for current segment
- # we add the new memory, left context key and left context value into the
- # staate and trim out the oldest part to keep the shape consistent.
- true_mems, lc_key, lc_val = self.state_update_before(
- layer=i,
- state=state,
- past_length=past_length,
- past_left_context=past_left_context,
- )
-
- output, mems, right_context_blocks, next_key, next_val = layer.forward_jit(
- input=output,
- lengths=lengths,
- mems=true_mems,
- right_context_blocks=right_context_blocks,
- left_context_key=lc_key,
- left_context_val=lc_val,
- rpe=rpe,
- )
- # mems is used for next layer
- mems_list, lc_key_list, lc_val_list, _ = self.state_update_after(
- layer=i,
- state=state,
- mems_list=mems_list,
- mems=mems,
- next_key=next_key,
- next_val=next_val,
- lc_key_list=lc_key_list,
- lc_val_list=lc_val_list,
- )
- i += 1
-
- # update state
- state = self.state_update_after_loop(
- state=state,
- mems_list=mems_list,
- lc_key_list=lc_key_list,
- lc_val_list=lc_val_list,
- update_length=1,
- )
-
- return output, lengths, state
-
- def quantize_(self, params=None):
- if params and "per_channel" in params and params["per_channel"]:
- qconfig = per_channel_dynamic_qconfig
- else:
- qconfig = default_dynamic_qconfig
- torch.quantization.quantize_dynamic(
- self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
- )
- return self
-
-
-# ------------------------------------------------------------------------------
-# Emformer encoder for seq2seq model
-# This is a wrapper over the original emformer
-# ------------------------------------------------------------------------------
-def emformer_encoder(klass):
- class SpeechEncoder(klass):
- def __init__(self, args):
- super().__init__(args)
- stride = SpeechEncoder.conv_layer_stride(args)
- trf_left_context = args.segment_left_context // stride
- trf_right_context = args.segment_right_context // stride
- context_config = [trf_left_context, trf_right_context]
- self.transformer_layers = nn.ModuleList(
- [
- NoSegAugmentedMemoryTransformerEncoderLayer(
- input_dim=args.encoder_embed_dim,
- num_heads=args.encoder_attention_heads,
- ffn_dim=args.encoder_ffn_embed_dim,
- num_layers=args.encoder_layers,
- dropout_in_attn=args.dropout,
- dropout_on_attn=args.dropout,
- dropout_on_fc1=args.dropout,
- dropout_on_fc2=args.dropout,
- activation_fn=args.activation_fn,
- context_config=context_config,
- segment_size=args.segment_length,
- max_memory_size=args.max_memory_size,
- scaled_init=True, # TODO: use constant for now.
- tanh_on_mem=args.amtrf_tanh_on_mem,
- )
- ]
- )
-
- def forward(self, src_tokens, src_lengths):
- encoder_out = super().forward(src_tokens, src_lengths)
- output = encoder_out["encoder_out"][0]
- encoder_padding_masks = encoder_out["encoder_padding_mask"][0]
-
- # This is because that in the original implementation
- # the output didn't consider the last segment as right context.
- encoder_padding_masks = encoder_padding_masks[:, : output.size(0)]
-
- return {
- "encoder_out": [output],
- "encoder_padding_mask": [encoder_padding_masks],
- "encoder_embedding": [],
- "encoder_states": [],
- "src_tokens": [],
- "src_lengths": [],
- }
-
- @staticmethod
- def conv_layer_stride(args):
- # TODO: make it configurable from the args
- return 4
-
- SpeechEncoder.__name__ = klass.__name__
- return SpeechEncoder
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/common/coco_schedule.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/common/coco_schedule.py
deleted file mode 100644
index 355e66a1d213cb599a7ffe55089d854089c8ead2..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/common/coco_schedule.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from fvcore.common.param_scheduler import MultiStepParamScheduler
-
-from detectron2.config import LazyCall as L
-from detectron2.solver import WarmupParamScheduler
-
-
-def default_X_scheduler(num_X):
- """
- Returns the config for a default multi-step LR scheduler such as "1x", "3x",
- commonly referred to in papers, where every 1x has the total length of 1440k
- training images (~12 COCO epochs). LR is decayed twice at the end of training
- following the strategy defined in "Rethinking ImageNet Pretraining", Sec 4.
-
- Args:
- num_X: a positive real number
-
- Returns:
- DictConfig: configs that define the multiplier for LR during training
- """
- # total number of iterations assuming 16 batch size, using 1440000/16=90000
- total_steps_16bs = num_X * 90000
-
- if num_X <= 2:
- scheduler = L(MultiStepParamScheduler)(
- values=[1.0, 0.1, 0.01],
- # note that scheduler is scale-invariant. This is equivalent to
- # milestones=[6, 8, 9]
- milestones=[60000, 80000, 90000],
- )
- else:
- scheduler = L(MultiStepParamScheduler)(
- values=[1.0, 0.1, 0.01],
- milestones=[total_steps_16bs - 60000, total_steps_16bs - 20000, total_steps_16bs],
- )
- return L(WarmupParamScheduler)(
- scheduler=scheduler,
- warmup_length=1000 / total_steps_16bs,
- warmup_method="linear",
- warmup_factor=0.001,
- )
-
-
-lr_multiplier_1x = default_X_scheduler(1)
-lr_multiplier_2x = default_X_scheduler(2)
-lr_multiplier_3x = default_X_scheduler(3)
-lr_multiplier_6x = default_X_scheduler(6)
-lr_multiplier_9x = default_X_scheduler(9)
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/custom_dataset_dataloader.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/custom_dataset_dataloader.py
deleted file mode 100644
index 4e9844c99b4acf318e2935963bbf94b878076591..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/custom_dataset_dataloader.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import copy
-import logging
-import numpy as np
-import operator
-import torch
-import torch.utils.data
-import json
-from detectron2.utils.comm import get_world_size
-
-from detectron2.data import samplers
-from torch.utils.data.sampler import BatchSampler, Sampler
-from detectron2.data.common import DatasetFromList, MapDataset
-from detectron2.data.dataset_mapper import DatasetMapper
-from detectron2.data.build import get_detection_dataset_dicts, build_batch_data_loader
-from detectron2.data.samplers import TrainingSampler, RepeatFactorTrainingSampler
-from detectron2.data.build import worker_init_reset_seed, print_instances_class_histogram
-from detectron2.data.build import filter_images_with_only_crowd_annotations
-from detectron2.data.build import filter_images_with_few_keypoints
-from detectron2.data.build import check_metadata_consistency
-from detectron2.data.catalog import MetadataCatalog, DatasetCatalog
-from detectron2.utils import comm
-import itertools
-import math
-from collections import defaultdict
-from typing import Optional
-
-# from .custom_build_augmentation import build_custom_augmentation
-
-def build_custom_train_loader(cfg, mapper=None):
- """
- Modified from detectron2.data.build.build_custom_train_loader, but supports
- different samplers
- """
- source_aware = cfg.DATALOADER.SOURCE_AWARE
- if source_aware:
- dataset_dicts = get_detection_dataset_dicts_with_source(
- cfg.DATASETS.TRAIN,
- filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
- min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
- if cfg.MODEL.KEYPOINT_ON
- else 0,
- proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
- )
- sizes = [0 for _ in range(len(cfg.DATASETS.TRAIN))]
- for d in dataset_dicts:
- sizes[d['dataset_source']] += 1
- print('dataset sizes', sizes)
- else:
- dataset_dicts = get_detection_dataset_dicts(
- cfg.DATASETS.TRAIN,
- filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
- min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
- if cfg.MODEL.KEYPOINT_ON
- else 0,
- proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
- )
- dataset = DatasetFromList(dataset_dicts, copy=False)
-
- if mapper is None:
- assert 0
- # mapper = DatasetMapper(cfg, True)
- dataset = MapDataset(dataset, mapper)
-
- sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
- logger = logging.getLogger(__name__)
- logger.info("Using training sampler {}".format(sampler_name))
- # TODO avoid if-else?
- if sampler_name == "TrainingSampler":
- sampler = TrainingSampler(len(dataset))
- elif sampler_name == "MultiDatasetSampler":
- assert source_aware
- sampler = MultiDatasetSampler(cfg, sizes, dataset_dicts)
- elif sampler_name == "RepeatFactorTrainingSampler":
- repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
- dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
- )
- sampler = RepeatFactorTrainingSampler(repeat_factors)
- elif sampler_name == "ClassAwareSampler":
- sampler = ClassAwareSampler(dataset_dicts)
- else:
- raise ValueError("Unknown training sampler: {}".format(sampler_name))
-
- return build_batch_data_loader(
- dataset,
- sampler,
- cfg.SOLVER.IMS_PER_BATCH,
- aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
- num_workers=cfg.DATALOADER.NUM_WORKERS,
- )
-
-
-class ClassAwareSampler(Sampler):
- def __init__(self, dataset_dicts, seed: Optional[int] = None):
- """
- Args:
- size (int): the total number of data of the underlying dataset to sample from
- seed (int): the initial seed of the shuffle. Must be the same
- across all workers. If None, will use a random seed shared
- among workers (require synchronization among all workers).
- """
- self._size = len(dataset_dicts)
- assert self._size > 0
- if seed is None:
- seed = comm.shared_random_seed()
- self._seed = int(seed)
-
- self._rank = comm.get_rank()
- self._world_size = comm.get_world_size()
- self.weights = self._get_class_balance_factor(dataset_dicts)
-
-
- def __iter__(self):
- start = self._rank
- yield from itertools.islice(
- self._infinite_indices(), start, None, self._world_size)
-
-
- def _infinite_indices(self):
- g = torch.Generator()
- g.manual_seed(self._seed)
- while True:
- ids = torch.multinomial(
- self.weights, self._size, generator=g,
- replacement=True)
- yield from ids
-
-
- def _get_class_balance_factor(self, dataset_dicts, l=1.):
- # 1. For each category c, compute the fraction of images that contain it: f(c)
- ret = []
- category_freq = defaultdict(int)
- for dataset_dict in dataset_dicts: # For each image (without repeats)
- cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
- for cat_id in cat_ids:
- category_freq[cat_id] += 1
- for i, dataset_dict in enumerate(dataset_dicts):
- cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
- ret.append(sum(
- [1. / (category_freq[cat_id] ** l) for cat_id in cat_ids]))
- return torch.tensor(ret).float()
-
-
-def get_detection_dataset_dicts_with_source(
- dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None
-):
- assert len(dataset_names)
- dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names]
- for dataset_name, dicts in zip(dataset_names, dataset_dicts):
- assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
-
- for source_id, (dataset_name, dicts) in \
- enumerate(zip(dataset_names, dataset_dicts)):
- assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
- for d in dicts:
- d['dataset_source'] = source_id
-
- if "annotations" in dicts[0]:
- try:
- class_names = MetadataCatalog.get(dataset_name).thing_classes
- check_metadata_consistency("thing_classes", dataset_name)
- print_instances_class_histogram(dicts, class_names)
- except AttributeError: # class names are not available for this dataset
- pass
-
- assert proposal_files is None
-
- dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
-
- has_instances = "annotations" in dataset_dicts[0]
- if filter_empty and has_instances:
- dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
- if min_keypoints > 0 and has_instances:
- dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
-
- return dataset_dicts
-
-class MultiDatasetSampler(Sampler):
- def __init__(self, cfg, sizes, dataset_dicts, seed: Optional[int] = None):
- """
- Args:
- size (int): the total number of data of the underlying dataset to sample from
- seed (int): the initial seed of the shuffle. Must be the same
- across all workers. If None, will use a random seed shared
- among workers (require synchronization among all workers).
- """
- self.sizes = sizes
- dataset_ratio = cfg.DATALOADER.DATASET_RATIO
- self._batch_size = cfg.SOLVER.IMS_PER_BATCH
- assert len(dataset_ratio) == len(sizes), \
- 'length of dataset ratio {} should be equal to number if dataset {}'.format(
- len(dataset_ratio), len(sizes)
- )
- if seed is None:
- seed = comm.shared_random_seed()
- self._seed = int(seed)
- self._rank = comm.get_rank()
- self._world_size = comm.get_world_size()
-
- self._ims_per_gpu = self._batch_size // self._world_size
- self.dataset_ids = torch.tensor(
- [d['dataset_source'] for d in dataset_dicts], dtype=torch.long)
-
- dataset_weight = [torch.ones(s) * max(sizes) / s * r / sum(dataset_ratio) \
- for i, (r, s) in enumerate(zip(dataset_ratio, sizes))]
- dataset_weight = torch.cat(dataset_weight)
- self.weights = dataset_weight
- self.sample_epoch_size = len(self.weights)
-
- def __iter__(self):
- start = self._rank
- yield from itertools.islice(
- self._infinite_indices(), start, None, self._world_size)
-
-
- def _infinite_indices(self):
- g = torch.Generator()
- g.manual_seed(self._seed)
- while True:
- ids = torch.multinomial(
- self.weights, self.sample_epoch_size, generator=g,
- replacement=True)
- nums = [(self.dataset_ids[ids] == i).sum().int().item() \
- for i in range(len(self.sizes))]
- print('_rank, len, nums', self._rank, len(ids), nums, flush=True)
- # print('_rank, len, nums, self.dataset_ids[ids[:10]], ',
- # self._rank, len(ids), nums, self.dataset_ids[ids[:10]],
- # flush=True)
- yield from ids
\ No newline at end of file
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/data/test_detection_utils.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/data/test_detection_utils.py
deleted file mode 100644
index aac56c07da2be4e181e3e95de8cee1fc2858286d..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/data/test_detection_utils.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import copy
-import numpy as np
-import os
-import unittest
-import pycocotools.mask as mask_util
-
-from detectron2.data import MetadataCatalog, detection_utils
-from detectron2.data import transforms as T
-from detectron2.structures import BitMasks, BoxMode
-from detectron2.utils.file_io import PathManager
-
-
-class TestTransformAnnotations(unittest.TestCase):
- def test_transform_simple_annotation(self):
- transforms = T.TransformList([T.HFlipTransform(400)])
- anno = {
- "bbox": np.asarray([10, 10, 200, 300]),
- "bbox_mode": BoxMode.XYXY_ABS,
- "category_id": 3,
- "segmentation": [[10, 10, 100, 100, 100, 10], [150, 150, 200, 150, 200, 200]],
- }
-
- output = detection_utils.transform_instance_annotations(anno, transforms, (400, 400))
- self.assertTrue(np.allclose(output["bbox"], [200, 10, 390, 300]))
- self.assertEqual(len(output["segmentation"]), len(anno["segmentation"]))
- self.assertTrue(np.allclose(output["segmentation"][0], [390, 10, 300, 100, 300, 10]))
-
- detection_utils.annotations_to_instances([output, output], (400, 400))
-
- def test_transform_empty_annotation(self):
- detection_utils.annotations_to_instances([], (400, 400))
-
- def test_flip_keypoints(self):
- transforms = T.TransformList([T.HFlipTransform(400)])
- anno = {
- "bbox": np.asarray([10, 10, 200, 300]),
- "bbox_mode": BoxMode.XYXY_ABS,
- "keypoints": np.random.rand(17, 3) * 50 + 15,
- }
-
- output = detection_utils.transform_instance_annotations(
- copy.deepcopy(anno),
- transforms,
- (400, 400),
- keypoint_hflip_indices=detection_utils.create_keypoint_hflip_indices(
- ["keypoints_coco_2017_train"]
- ),
- )
- # The first keypoint is nose
- self.assertTrue(np.allclose(output["keypoints"][0, 0], 400 - anno["keypoints"][0, 0]))
- # The last 16 keypoints are 8 left-right pairs
- self.assertTrue(
- np.allclose(
- output["keypoints"][1:, 0].reshape(-1, 2)[:, ::-1],
- 400 - anno["keypoints"][1:, 0].reshape(-1, 2),
- )
- )
- self.assertTrue(
- np.allclose(
- output["keypoints"][1:, 1:].reshape(-1, 2, 2)[:, ::-1, :],
- anno["keypoints"][1:, 1:].reshape(-1, 2, 2),
- )
- )
-
- def test_crop(self):
- transforms = T.TransformList([T.CropTransform(300, 300, 10, 10)])
- keypoints = np.random.rand(17, 3) * 50 + 15
- keypoints[:, 2] = 2
- anno = {
- "bbox": np.asarray([10, 10, 200, 400]),
- "bbox_mode": BoxMode.XYXY_ABS,
- "keypoints": keypoints,
- }
-
- output = detection_utils.transform_instance_annotations(
- copy.deepcopy(anno), transforms, (10, 10)
- )
- # box is shifted and cropped
- self.assertTrue((output["bbox"] == np.asarray([0, 0, 0, 10])).all())
- # keypoints are no longer visible
- self.assertTrue((output["keypoints"][:, 2] == 0).all())
-
- def test_transform_RLE(self):
- transforms = T.TransformList([T.HFlipTransform(400)])
- mask = np.zeros((300, 400), order="F").astype("uint8")
- mask[:, :200] = 1
-
- anno = {
- "bbox": np.asarray([10, 10, 200, 300]),
- "bbox_mode": BoxMode.XYXY_ABS,
- "segmentation": mask_util.encode(mask[:, :, None])[0],
- "category_id": 3,
- }
- output = detection_utils.transform_instance_annotations(
- copy.deepcopy(anno), transforms, (300, 400)
- )
- mask = output["segmentation"]
- self.assertTrue((mask[:, 200:] == 1).all())
- self.assertTrue((mask[:, :200] == 0).all())
-
- inst = detection_utils.annotations_to_instances(
- [output, output], (400, 400), mask_format="bitmask"
- )
- self.assertTrue(isinstance(inst.gt_masks, BitMasks))
-
- def test_transform_RLE_resize(self):
- transforms = T.TransformList(
- [T.HFlipTransform(400), T.ScaleTransform(300, 400, 400, 400, "bilinear")]
- )
- mask = np.zeros((300, 400), order="F").astype("uint8")
- mask[:, :200] = 1
-
- anno = {
- "bbox": np.asarray([10, 10, 200, 300]),
- "bbox_mode": BoxMode.XYXY_ABS,
- "segmentation": mask_util.encode(mask[:, :, None])[0],
- "category_id": 3,
- }
- output = detection_utils.transform_instance_annotations(
- copy.deepcopy(anno), transforms, (400, 400)
- )
-
- inst = detection_utils.annotations_to_instances(
- [output, output], (400, 400), mask_format="bitmask"
- )
- self.assertTrue(isinstance(inst.gt_masks, BitMasks))
-
- def test_gen_crop(self):
- instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS}
- t = detection_utils.gen_crop_transform_with_instance((10, 10), (150, 150), instance)
- # the box center must fall into the cropped region
- self.assertTrue(t.x0 <= 55 <= t.x0 + t.w)
-
- def test_gen_crop_outside_boxes(self):
- instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS}
- with self.assertRaises(AssertionError):
- detection_utils.gen_crop_transform_with_instance((10, 10), (15, 15), instance)
-
- def test_read_sem_seg(self):
- cityscapes_dir = MetadataCatalog.get("cityscapes_fine_sem_seg_val").gt_dir
- sem_seg_gt_path = os.path.join(
- cityscapes_dir, "frankfurt", "frankfurt_000001_083852_gtFine_labelIds.png"
- )
- if not PathManager.exists(sem_seg_gt_path):
- raise unittest.SkipTest(
- "Semantic segmentation ground truth {} not found.".format(sem_seg_gt_path)
- )
- sem_seg = detection_utils.read_image(sem_seg_gt_path, "L")
- self.assertEqual(sem_seg.ndim, 3)
- self.assertEqual(sem_seg.shape[2], 1)
- self.assertEqual(sem_seg.dtype, np.uint8)
- self.assertEqual(sem_seg.max(), 32)
- self.assertEqual(sem_seg.min(), 1)
-
- def test_read_exif_orientation(self):
- # https://github.com/recurser/exif-orientation-examples/raw/master/Landscape_5.jpg
- URL = "detectron2://assets/Landscape_5.jpg"
- img = detection_utils.read_image(URL, "RGB")
- self.assertEqual(img.ndim, 3)
- self.assertEqual(img.dtype, np.uint8)
- self.assertEqual(img.shape, (1200, 1800, 3)) # check that shape is not transposed
-
- def test_opencv_exif_orientation(self):
- import cv2
-
- URL = "detectron2://assets/Landscape_5.jpg"
- with PathManager.open(URL, "rb") as f:
- img = cv2.imdecode(np.frombuffer(f.read(), dtype="uint8"), cv2.IMREAD_COLOR)
- self.assertEqual(img.dtype, np.uint8)
- self.assertEqual(img.shape, (1200, 1800, 3))
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/OpenGenAI/parti-prompts-leaderboard/README.md b/spaces/OpenGenAI/parti-prompts-leaderboard/README.md
deleted file mode 100644
index 4ff39552c089275b8ac0cc27fa55644907a597e9..0000000000000000000000000000000000000000
--- a/spaces/OpenGenAI/parti-prompts-leaderboard/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Parti Prompts Leaderboard
-emoji: 📊
-colorFrom: blue
-colorTo: purple
-sdk: gradio
-sdk_version: 3.41.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/PKUWilliamYang/StyleGANEX/models/bisenet/model.py b/spaces/PKUWilliamYang/StyleGANEX/models/bisenet/model.py
deleted file mode 100644
index 1d2a16ca7533c7b92c600c4dddb89f5f68191d4f..0000000000000000000000000000000000000000
--- a/spaces/PKUWilliamYang/StyleGANEX/models/bisenet/model.py
+++ /dev/null
@@ -1,283 +0,0 @@
-#!/usr/bin/python
-# -*- encoding: utf-8 -*-
-
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torchvision
-
-from models.bisenet.resnet import Resnet18
-# from modules.bn import InPlaceABNSync as BatchNorm2d
-
-
-class ConvBNReLU(nn.Module):
- def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs):
- super(ConvBNReLU, self).__init__()
- self.conv = nn.Conv2d(in_chan,
- out_chan,
- kernel_size = ks,
- stride = stride,
- padding = padding,
- bias = False)
- self.bn = nn.BatchNorm2d(out_chan)
- self.init_weight()
-
- def forward(self, x):
- x = self.conv(x)
- x = F.relu(self.bn(x))
- return x
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
-class BiSeNetOutput(nn.Module):
- def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs):
- super(BiSeNetOutput, self).__init__()
- self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1)
- self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False)
- self.init_weight()
-
- def forward(self, x):
- x = self.conv(x)
- x = self.conv_out(x)
- return x
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
- def get_params(self):
- wd_params, nowd_params = [], []
- for name, module in self.named_modules():
- if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
- wd_params.append(module.weight)
- if not module.bias is None:
- nowd_params.append(module.bias)
- elif isinstance(module, nn.BatchNorm2d):
- nowd_params += list(module.parameters())
- return wd_params, nowd_params
-
-
-class AttentionRefinementModule(nn.Module):
- def __init__(self, in_chan, out_chan, *args, **kwargs):
- super(AttentionRefinementModule, self).__init__()
- self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
- self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False)
- self.bn_atten = nn.BatchNorm2d(out_chan)
- self.sigmoid_atten = nn.Sigmoid()
- self.init_weight()
-
- def forward(self, x):
- feat = self.conv(x)
- atten = F.avg_pool2d(feat, feat.size()[2:])
- atten = self.conv_atten(atten)
- atten = self.bn_atten(atten)
- atten = self.sigmoid_atten(atten)
- out = torch.mul(feat, atten)
- return out
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
-
-class ContextPath(nn.Module):
- def __init__(self, *args, **kwargs):
- super(ContextPath, self).__init__()
- self.resnet = Resnet18()
- self.arm16 = AttentionRefinementModule(256, 128)
- self.arm32 = AttentionRefinementModule(512, 128)
- self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
- self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
- self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0)
-
- self.init_weight()
-
- def forward(self, x):
- H0, W0 = x.size()[2:]
- feat8, feat16, feat32 = self.resnet(x)
- H8, W8 = feat8.size()[2:]
- H16, W16 = feat16.size()[2:]
- H32, W32 = feat32.size()[2:]
-
- avg = F.avg_pool2d(feat32, feat32.size()[2:])
- avg = self.conv_avg(avg)
- avg_up = F.interpolate(avg, (H32, W32), mode='nearest')
-
- feat32_arm = self.arm32(feat32)
- feat32_sum = feat32_arm + avg_up
- feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest')
- feat32_up = self.conv_head32(feat32_up)
-
- feat16_arm = self.arm16(feat16)
- feat16_sum = feat16_arm + feat32_up
- feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest')
- feat16_up = self.conv_head16(feat16_up)
-
- return feat8, feat16_up, feat32_up # x8, x8, x16
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
- def get_params(self):
- wd_params, nowd_params = [], []
- for name, module in self.named_modules():
- if isinstance(module, (nn.Linear, nn.Conv2d)):
- wd_params.append(module.weight)
- if not module.bias is None:
- nowd_params.append(module.bias)
- elif isinstance(module, nn.BatchNorm2d):
- nowd_params += list(module.parameters())
- return wd_params, nowd_params
-
-
-### This is not used, since I replace this with the resnet feature with the same size
-class SpatialPath(nn.Module):
- def __init__(self, *args, **kwargs):
- super(SpatialPath, self).__init__()
- self.conv1 = ConvBNReLU(3, 64, ks=7, stride=2, padding=3)
- self.conv2 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1)
- self.conv3 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1)
- self.conv_out = ConvBNReLU(64, 128, ks=1, stride=1, padding=0)
- self.init_weight()
-
- def forward(self, x):
- feat = self.conv1(x)
- feat = self.conv2(feat)
- feat = self.conv3(feat)
- feat = self.conv_out(feat)
- return feat
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
- def get_params(self):
- wd_params, nowd_params = [], []
- for name, module in self.named_modules():
- if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
- wd_params.append(module.weight)
- if not module.bias is None:
- nowd_params.append(module.bias)
- elif isinstance(module, nn.BatchNorm2d):
- nowd_params += list(module.parameters())
- return wd_params, nowd_params
-
-
-class FeatureFusionModule(nn.Module):
- def __init__(self, in_chan, out_chan, *args, **kwargs):
- super(FeatureFusionModule, self).__init__()
- self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)
- self.conv1 = nn.Conv2d(out_chan,
- out_chan//4,
- kernel_size = 1,
- stride = 1,
- padding = 0,
- bias = False)
- self.conv2 = nn.Conv2d(out_chan//4,
- out_chan,
- kernel_size = 1,
- stride = 1,
- padding = 0,
- bias = False)
- self.relu = nn.ReLU(inplace=True)
- self.sigmoid = nn.Sigmoid()
- self.init_weight()
-
- def forward(self, fsp, fcp):
- fcat = torch.cat([fsp, fcp], dim=1)
- feat = self.convblk(fcat)
- atten = F.avg_pool2d(feat, feat.size()[2:])
- atten = self.conv1(atten)
- atten = self.relu(atten)
- atten = self.conv2(atten)
- atten = self.sigmoid(atten)
- feat_atten = torch.mul(feat, atten)
- feat_out = feat_atten + feat
- return feat_out
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
- def get_params(self):
- wd_params, nowd_params = [], []
- for name, module in self.named_modules():
- if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
- wd_params.append(module.weight)
- if not module.bias is None:
- nowd_params.append(module.bias)
- elif isinstance(module, nn.BatchNorm2d):
- nowd_params += list(module.parameters())
- return wd_params, nowd_params
-
-
-class BiSeNet(nn.Module):
- def __init__(self, n_classes, *args, **kwargs):
- super(BiSeNet, self).__init__()
- self.cp = ContextPath()
- ## here self.sp is deleted
- self.ffm = FeatureFusionModule(256, 256)
- self.conv_out = BiSeNetOutput(256, 256, n_classes)
- self.conv_out16 = BiSeNetOutput(128, 64, n_classes)
- self.conv_out32 = BiSeNetOutput(128, 64, n_classes)
- self.init_weight()
-
- def forward(self, x):
- H, W = x.size()[2:]
- feat_res8, feat_cp8, feat_cp16 = self.cp(x) # here return res3b1 feature
- feat_sp = feat_res8 # use res3b1 feature to replace spatial path feature
- feat_fuse = self.ffm(feat_sp, feat_cp8)
-
- feat_out = self.conv_out(feat_fuse)
- feat_out16 = self.conv_out16(feat_cp8)
- feat_out32 = self.conv_out32(feat_cp16)
-
- feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True)
- feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True)
- feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True)
- return feat_out, feat_out16, feat_out32
-
- def init_weight(self):
- for ly in self.children():
- if isinstance(ly, nn.Conv2d):
- nn.init.kaiming_normal_(ly.weight, a=1)
- if not ly.bias is None: nn.init.constant_(ly.bias, 0)
-
- def get_params(self):
- wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], []
- for name, child in self.named_children():
- child_wd_params, child_nowd_params = child.get_params()
- if isinstance(child, FeatureFusionModule) or isinstance(child, BiSeNetOutput):
- lr_mul_wd_params += child_wd_params
- lr_mul_nowd_params += child_nowd_params
- else:
- wd_params += child_wd_params
- nowd_params += child_nowd_params
- return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params
-
-
-if __name__ == "__main__":
- net = BiSeNet(19)
- net.cuda()
- net.eval()
- in_ten = torch.randn(16, 3, 640, 480).cuda()
- out, out16, out32 = net(in_ten)
- print(out.shape)
-
- net.get_params()
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-4/gnu.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-4/gnu.go
deleted file mode 100644
index 756c1a3c9caaa353e9b20c8dba10a7ff82e3563a..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-4/gnu.go and /dev/null differ
diff --git a/spaces/Pengyey/bingo-chuchu/src/components/welcome-screen.tsx b/spaces/Pengyey/bingo-chuchu/src/components/welcome-screen.tsx
deleted file mode 100644
index f7449fcbb6c621875e235db98f2790bf7894fb0a..0000000000000000000000000000000000000000
--- a/spaces/Pengyey/bingo-chuchu/src/components/welcome-screen.tsx
+++ /dev/null
@@ -1,34 +0,0 @@
-import { useBing } from '@/lib/hooks/use-bing'
-
-const exampleMessages = [
- {
- heading: '🧐 提出复杂问题',
- message: `我可以为我挑剔的只吃橙色食物的孩子做什么饭?`
- },
- {
- heading: '🙌 获取更好的答案',
- message: '销量最高的 3 种宠物吸尘器有哪些优点和缺点?'
- },
- {
- heading: '🎨 获得创意灵感',
- message: `以海盗的口吻写一首关于外太空鳄鱼的俳句`
- }
-]
-
-export function WelcomeScreen({ setInput }: Pick, 'setInput'>) {
- return (
-
- {exampleMessages.map(example => (
-
- ))}
-
- )
-}
diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/evaluation/od_to_grounding/od_eval.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/evaluation/od_to_grounding/od_eval.py
deleted file mode 100644
index c67686735a0e3d4586af8954f1e8155f5512dc3e..0000000000000000000000000000000000000000
--- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/evaluation/od_to_grounding/od_eval.py
+++ /dev/null
@@ -1,532 +0,0 @@
-import logging
-import tempfile
-import os
-import torch
-import numpy as np
-import json
-
-from collections import OrderedDict
-from tqdm import tqdm
-
-from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
-from maskrcnn_benchmark.structures.bounding_box import BoxList
-from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
-
-
-def do_od_evaluation(
- dataset,
- predictions,
- box_only,
- output_folder,
- iou_types,
- expected_results,
- expected_results_sigma_tol,
-):
- logger = logging.getLogger("maskrcnn_benchmark.inference")
-
- if box_only:
- logger.info("Evaluating bbox proposals")
- if dataset.coco is None and output_folder:
- json_results = prepare_for_tsv_detection(predictions, dataset)
- with open(os.path.join(output_folder, "box_proposals.json"), "w") as f:
- json.dump(json_results, f)
- return None
- areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
- res = COCOResults("box_proposal")
- for limit in [100, 1000]:
- for area, suffix in areas.items():
- stats = evaluate_box_proposals(
- predictions, dataset, area=area, limit=limit
- )
- key = "AR{}@{:d}".format(suffix, limit)
- res.results["box_proposal"][key] = stats["ar"].item()
- logger.info(res)
- check_expected_results(res, expected_results, expected_results_sigma_tol)
- if output_folder:
- torch.save(res, os.path.join(output_folder, "box_proposals.pth"))
- return res, predictions
- logger.info("Preparing results for COCO format")
- coco_results = {}
- if "bbox" in iou_types:
- logger.info("Preparing bbox results")
- if dataset.coco is None:
- coco_results["bbox"] = prepare_for_tsv_detection(predictions, dataset)
- else:
- coco_results["bbox"] = prepare_for_coco_detection(predictions, dataset)
- if "segm" in iou_types:
- logger.info("Preparing segm results")
- coco_results["segm"] = prepare_for_coco_segmentation(predictions, dataset)
- if 'keypoints' in iou_types:
- logger.info('Preparing keypoints results')
- coco_results['keypoints'] = prepare_for_coco_keypoint(predictions, dataset)
-
- results = COCOResults(*iou_types)
- logger.info("Evaluating predictions")
- for iou_type in iou_types:
- with tempfile.NamedTemporaryFile() as f:
- file_path = f.name
- if output_folder:
- file_path = os.path.join(output_folder, iou_type + ".json")
- if dataset.coco:
- res = evaluate_predictions_on_coco(
- dataset.coco, coco_results[iou_type], file_path, iou_type
- )
- results.update(res)
- elif output_folder:
- with open(file_path, "w") as f:
- json.dump(coco_results[iou_type], f)
-
- logger.info(results)
- check_expected_results(results, expected_results, expected_results_sigma_tol)
- if output_folder:
- torch.save(results, os.path.join(output_folder, "coco_results.pth"))
- return results, coco_results
-
-
-def prepare_for_tsv_detection(predictions, dataset):
- # assert isinstance(dataset, COCODataset)
- proposal_results = []
- image_list = []
- for im_id, prediction in enumerate(predictions):
- image_info = dataset.get_img_info(im_id)
- if len(prediction) == 0:
- continue
-
- # TODO replace with get_img_info?
- image_id = image_info["id"]
- image_width = image_info["width"]
- image_height = image_info["height"]
- prediction = prediction.resize((image_width, image_height))
- prediction = prediction.convert("xywh")
-
- boxes = prediction.bbox.tolist()
- scores = prediction.get_field("scores").tolist()
- labels = prediction.get_field("labels").tolist()
- if prediction.has_field("centers"):
- centers = prediction.get_field("centers")
- else:
- centers = None
-
- for k, box in enumerate(boxes):
- proposal = {
- "image_id": image_id,
- "category_id": labels[k],
- "bbox": box,
- "score": scores[k],
- "area": image_width * image_height,
- "iscrowd": 0,
- }
- if centers is not None:
- proposal.update(center=centers[k].tolist())
- proposal_results.append(proposal)
-
- image_list.append(image_info)
-
- # categories = [{'supercategory': 'proposal', 'id': 0, 'name': 'proposal'}]
- return dict(images=image_list, annotations=proposal_results)
-
-
-def prepare_for_coco_detection(predictions, dataset):
- # assert isinstance(dataset, COCODataset)
- coco_results = []
- for image_id, prediction in enumerate(predictions):
- original_id = dataset.id_to_img_map[image_id]
- if len(prediction) == 0:
- continue
-
- # TODO replace with get_img_info?
- image_width = dataset.coco.imgs[original_id]["width"]
- image_height = dataset.coco.imgs[original_id]["height"]
- prediction = prediction.resize((image_width, image_height))
- prediction = prediction.convert("xywh")
-
- boxes = prediction.bbox.tolist()
- scores = prediction.get_field("scores").tolist()
- labels = prediction.get_field("labels").tolist()
-
- for k, box in enumerate(boxes):
- if labels[k] in dataset.contiguous_category_id_to_json_id:
- coco_results.append(
- {
- "image_id": original_id,
- "category_id": dataset.contiguous_category_id_to_json_id[labels[k]],
- "bbox": box,
- "score": scores[k],
- })
-
- return coco_results
-
-
-def prepare_for_coco_segmentation(predictions, dataset):
- import pycocotools.mask as mask_util
- import numpy as np
-
- masker = Masker(threshold=0.5, padding=1)
- # assert isinstance(dataset, COCODataset)
- coco_results = []
- for image_id, prediction in tqdm(enumerate(predictions)):
- original_id = dataset.id_to_img_map[image_id]
- if len(prediction) == 0:
- continue
-
- # TODO replace with get_img_info?
- image_width = dataset.coco.imgs[original_id]["width"]
- image_height = dataset.coco.imgs[original_id]["height"]
- prediction = prediction.resize((image_width, image_height))
- masks = prediction.get_field("mask")
- # t = time.time()
- # Masker is necessary only if masks haven't been already resized.
- if list(masks.shape[-2:]) != [image_height, image_width]:
- masks = masker(masks.expand(1, -1, -1, -1, -1), prediction)
- masks = masks[0]
- # logger.info('Time mask: {}'.format(time.time() - t))
- # prediction = prediction.convert('xywh')
-
- # boxes = prediction.bbox.tolist()
- scores = prediction.get_field("scores").tolist()
- labels = prediction.get_field("labels").tolist()
-
- # rles = prediction.get_field('mask')
-
- rles = [
- mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
- for mask in masks
- ]
- for rle in rles:
- rle["counts"] = rle["counts"].decode("utf-8")
-
- mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
-
- coco_results.extend(
- [
- {
- "image_id": original_id,
- "category_id": mapped_labels[k],
- "segmentation": rle,
- "score": scores[k],
- }
- for k, rle in enumerate(rles)
- ]
- )
- return coco_results
-
-
-def prepare_for_coco_keypoint(predictions, dataset):
- # assert isinstance(dataset, COCODataset)
- coco_results = []
- for image_id, prediction in enumerate(predictions):
- original_id = dataset.id_to_img_map[image_id]
- if len(prediction.bbox) == 0:
- continue
-
- # TODO replace with get_img_info?
- image_width = dataset.coco.imgs[original_id]['width']
- image_height = dataset.coco.imgs[original_id]['height']
- prediction = prediction.resize((image_width, image_height))
- prediction = prediction.convert('xywh')
-
- boxes = prediction.bbox.tolist()
- scores = prediction.get_field('scores').tolist()
- labels = prediction.get_field('labels').tolist()
- keypoints = prediction.get_field('keypoints')
- keypoints = keypoints.resize((image_width, image_height))
- keypoints = keypoints.to_coco_format()
-
- mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
-
- coco_results.extend([{
- 'image_id': original_id,
- 'category_id': mapped_labels[k],
- 'keypoints': keypoint,
- 'score': scores[k]} for k, keypoint in enumerate(keypoints)])
- return coco_results
-
-
-# inspired from Detectron
-def evaluate_box_proposals(
- predictions, dataset, thresholds=None, area="all", limit=None
-):
- """Evaluate detection proposal recall metrics. This function is a much
- faster alternative to the official COCO API recall evaluation code. However,
- it produces slightly different results.
- """
- # Record max overlap value for each gt box
- # Return vector of overlap values
- areas = {
- "all": 0,
- "small": 1,
- "medium": 2,
- "large": 3,
- "96-128": 4,
- "128-256": 5,
- "256-512": 6,
- "512-inf": 7,
- }
- area_ranges = [
- [0 ** 2, 1e5 ** 2], # all
- [0 ** 2, 32 ** 2], # small
- [32 ** 2, 96 ** 2], # medium
- [96 ** 2, 1e5 ** 2], # large
- [96 ** 2, 128 ** 2], # 96-128
- [128 ** 2, 256 ** 2], # 128-256
- [256 ** 2, 512 ** 2], # 256-512
- [512 ** 2, 1e5 ** 2],
- ] # 512-inf
- assert area in areas, "Unknown area range: {}".format(area)
- area_range = area_ranges[areas[area]]
- gt_overlaps = []
- num_pos = 0
-
- for image_id, prediction in enumerate(predictions):
- original_id = dataset.id_to_img_map[image_id]
-
- # TODO replace with get_img_info?
- image_width = dataset.coco.imgs[original_id]["width"]
- image_height = dataset.coco.imgs[original_id]["height"]
- prediction = prediction.resize((image_width, image_height))
-
- # sort predictions in descending order
- # TODO maybe remove this and make it explicit in the documentation
- if prediction.has_field("objectness"):
- inds = prediction.get_field("objectness").sort(descending=True)[1]
- else:
- inds = prediction.get_field("scores").sort(descending=True)[1]
- prediction = prediction[inds]
-
- ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
- anno = dataset.coco.loadAnns(ann_ids)
- gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
- gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
- gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert(
- "xyxy"
- )
- gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
-
- if len(gt_boxes) == 0:
- continue
-
- valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
- gt_boxes = gt_boxes[valid_gt_inds]
-
- num_pos += len(gt_boxes)
-
- if len(gt_boxes) == 0:
- continue
-
- if len(prediction) == 0:
- continue
-
- if limit is not None and len(prediction) > limit:
- prediction = prediction[:limit]
-
- overlaps = boxlist_iou(prediction, gt_boxes)
-
- _gt_overlaps = torch.zeros(len(gt_boxes))
- for j in range(min(len(prediction), len(gt_boxes))):
- # find which proposal box maximally covers each gt box
- # and get the iou amount of coverage for each gt box
- max_overlaps, argmax_overlaps = overlaps.max(dim=0)
-
- # find which gt box is 'best' covered (i.e. 'best' = most iou)
- gt_ovr, gt_ind = max_overlaps.max(dim=0)
- assert gt_ovr >= 0
- # find the proposal box that covers the best covered gt box
- box_ind = argmax_overlaps[gt_ind]
- # record the iou coverage of this gt box
- _gt_overlaps[j] = overlaps[box_ind, gt_ind]
- assert _gt_overlaps[j] == gt_ovr
- # mark the proposal box and the gt box as used
- overlaps[box_ind, :] = -1
- overlaps[:, gt_ind] = -1
-
- # append recorded iou coverage level
- gt_overlaps.append(_gt_overlaps)
-
- if len(gt_overlaps) == 0:
- return {
- "ar": torch.zeros(1),
- "recalls": torch.zeros(1),
- "thresholds": thresholds,
- "gt_overlaps": gt_overlaps,
- "num_pos": num_pos,
- }
-
- gt_overlaps = torch.cat(gt_overlaps, dim=0)
- gt_overlaps, _ = torch.sort(gt_overlaps)
-
- if thresholds is None:
- step = 0.05
- thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
- recalls = torch.zeros_like(thresholds)
- # compute recall for each iou threshold
- for i, t in enumerate(thresholds):
- recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
- # ar = 2 * np.trapz(recalls, thresholds)
- ar = recalls.mean()
- return {
- "ar": ar,
- "recalls": recalls,
- "thresholds": thresholds,
- "gt_overlaps": gt_overlaps,
- "num_pos": num_pos,
- }
-
-
-def evaluate_predictions_on_coco(
- coco_gt, coco_results, json_result_file, iou_type="bbox"
-):
- import json
-
- with open(json_result_file, "w") as f:
- json.dump(coco_results, f)
-
- from pycocotools.coco import COCO
- from pycocotools.cocoeval import COCOeval
-
- coco_dt = coco_gt.loadRes(str(json_result_file)) if coco_results else COCO()
-
- # coco_dt = coco_gt.loadRes(coco_results)
- if iou_type == 'keypoints':
- coco_gt = filter_valid_keypoints(coco_gt, coco_dt)
- coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
- coco_eval.evaluate()
- coco_eval.accumulate()
- coco_eval.summarize()
- if iou_type == 'bbox':
- summarize_per_category(coco_eval, json_result_file.replace('.json', '.csv'))
- return coco_eval
-
-
-def summarize_per_category(coco_eval, csv_output=None):
- '''
- Compute and display summary metrics for evaluation results.
- Note this functin can *only* be applied on the default parameter setting
- '''
-
- def _summarize(iouThr=None, areaRng='all', maxDets=100):
- p = coco_eval.params
- titleStr = 'Average Precision'
- typeStr = '(AP)'
- iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
- if iouThr is None else '{:0.2f}'.format(iouThr)
- result_str = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ], '. \
- format(titleStr, typeStr, iouStr, areaRng, maxDets)
-
- aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
- mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
-
- # dimension of precision: [TxRxKxAxM]
- s = coco_eval.eval['precision']
- # IoU
- if iouThr is not None:
- t = np.where(iouThr == p.iouThrs)[0]
- s = s[t]
- s = s[:, :, :, aind, mind]
-
- if len(s[s > -1]) == 0:
- mean_s = -1
- else:
- mean_s = np.mean(s[s > -1])
- # cacluate AP(average precision) for each category
- num_classes = len(p.catIds)
- avg_ap = 0.0
- for i in range(0, num_classes):
- result_str += '{}, '.format(np.mean(s[:, :, i, :]))
- avg_ap += np.mean(s[:, :, i, :])
- result_str += ('{} \n'.format(avg_ap / num_classes))
- return result_str
-
- id2name = {}
- for _, cat in coco_eval.cocoGt.cats.items():
- id2name[cat['id']] = cat['name']
- title_str = 'metric, '
- for cid in coco_eval.params.catIds:
- title_str += '{}, '.format(id2name[cid])
- title_str += 'avg \n'
-
- results = [title_str]
- results.append(_summarize())
- results.append(_summarize(iouThr=.5, maxDets=coco_eval.params.maxDets[2]))
- results.append(_summarize(areaRng='small', maxDets=coco_eval.params.maxDets[2]))
- results.append(_summarize(areaRng='medium', maxDets=coco_eval.params.maxDets[2]))
- results.append(_summarize(areaRng='large', maxDets=coco_eval.params.maxDets[2]))
-
- with open(csv_output, 'w') as f:
- for result in results:
- f.writelines(result)
-
-
-def filter_valid_keypoints(coco_gt, coco_dt):
- kps = coco_dt.anns[1]['keypoints']
- for id, ann in coco_gt.anns.items():
- ann['keypoints'][2::3] = [a * b for a, b in zip(ann['keypoints'][2::3], kps[2::3])]
- ann['num_keypoints'] = sum(ann['keypoints'][2::3])
- return coco_gt
-
-
-class COCOResults(object):
- METRICS = {
- "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
- "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
- "box_proposal": [
- "AR@100",
- "ARs@100",
- "ARm@100",
- "ARl@100",
- "AR@1000",
- "ARs@1000",
- "ARm@1000",
- "ARl@1000",
- ],
- "keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
- }
-
- def __init__(self, *iou_types):
- allowed_types = ("box_proposal", "bbox", "segm", "keypoints")
- assert all(iou_type in allowed_types for iou_type in iou_types)
- results = OrderedDict()
- for iou_type in iou_types:
- results[iou_type] = OrderedDict(
- [(metric, -1) for metric in COCOResults.METRICS[iou_type]]
- )
- self.results = results
-
- def update(self, coco_eval):
- if coco_eval is None:
- return
- from pycocotools.cocoeval import COCOeval
-
- assert isinstance(coco_eval, COCOeval)
- s = coco_eval.stats
- iou_type = coco_eval.params.iouType
- res = self.results[iou_type]
- metrics = COCOResults.METRICS[iou_type]
- for idx, metric in enumerate(metrics):
- res[metric] = s[idx]
-
- def __repr__(self):
- # TODO make it pretty
- return repr(self.results)
-
-
-def check_expected_results(results, expected_results, sigma_tol):
- if not expected_results:
- return
-
- logger = logging.getLogger("maskrcnn_benchmark.inference")
- for task, metric, (mean, std) in expected_results:
- actual_val = results.results[task][metric]
- lo = mean - sigma_tol * std
- hi = mean + sigma_tol * std
- ok = (lo < actual_val) and (actual_val < hi)
- msg = (
- "{} > {} sanity check (actual vs. expected): "
- "{:.3f} vs. mean={:.4f}, std={:.4}, range=({:.4f}, {:.4f})"
- ).format(task, metric, actual_val, mean, std, lo, hi)
- if not ok:
- msg = "FAIL: " + msg
- logger.error(msg)
- else:
- msg = "PASS: " + msg
- logger.info(msg)
-
diff --git a/spaces/Realcat/image-matching-webui/third_party/r2d2/tools/transforms_tools.py b/spaces/Realcat/image-matching-webui/third_party/r2d2/tools/transforms_tools.py
deleted file mode 100644
index 77eb1da2306116d789cdcf6b957a6c144a746a4f..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/third_party/r2d2/tools/transforms_tools.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# Copyright 2019-present NAVER Corp.
-# CC BY-NC-SA 3.0
-# Available only for non-commercial use
-
-import pdb
-import numpy as np
-from PIL import Image, ImageOps, ImageEnhance
-
-
-class DummyImg:
- """This class is a dummy image only defined by its size."""
-
- def __init__(self, size):
- self.size = size
-
- def resize(self, size, *args, **kwargs):
- return DummyImg(size)
-
- def expand(self, border):
- w, h = self.size
- if isinstance(border, int):
- size = (w + 2 * border, h + 2 * border)
- else:
- l, t, r, b = border
- size = (w + l + r, h + t + b)
- return DummyImg(size)
-
- def crop(self, border):
- w, h = self.size
- l, t, r, b = border
- assert 0 <= l <= r <= w
- assert 0 <= t <= b <= h
- size = (r - l, b - t)
- return DummyImg(size)
-
- def rotate(self, angle):
- raise NotImplementedError
-
- def transform(self, size, *args, **kwargs):
- return DummyImg(size)
-
-
-def grab_img(img_and_label):
- """Called to extract the image from an img_and_label input
- (a dictionary). Also compatible with old-style PIL images.
- """
- if isinstance(img_and_label, dict):
- # if input is a dictionary, then
- # it must contains the img or its size.
- try:
- return img_and_label["img"]
- except KeyError:
- return DummyImg(img_and_label["imsize"])
-
- else:
- # or it must be the img directly
- return img_and_label
-
-
-def update_img_and_labels(img_and_label, img, persp=None):
- """Called to update the img_and_label"""
- if isinstance(img_and_label, dict):
- img_and_label["img"] = img
- img_and_label["imsize"] = img.size
-
- if persp:
- if "persp" not in img_and_label:
- img_and_label["persp"] = (1, 0, 0, 0, 1, 0, 0, 0)
- img_and_label["persp"] = persp_mul(persp, img_and_label["persp"])
-
- return img_and_label
-
- else:
- # or it must be the img directly
- return img
-
-
-def rand_log_uniform(a, b):
- return np.exp(np.random.uniform(np.log(a), np.log(b)))
-
-
-def translate(tx, ty):
- return (1, 0, tx, 0, 1, ty, 0, 0)
-
-
-def rotate(angle):
- return (np.cos(angle), -np.sin(angle), 0, np.sin(angle), np.cos(angle), 0, 0, 0)
-
-
-def persp_mul(mat, mat2):
- """homography (perspective) multiplication.
- mat: 8-tuple (homography transform)
- mat2: 8-tuple (homography transform) or 2-tuple (point)
- """
- assert isinstance(mat, tuple)
- assert isinstance(mat2, tuple)
-
- mat = np.float32(mat + (1,)).reshape(3, 3)
- mat2 = np.array(mat2 + (1,)).reshape(3, 3)
- res = np.dot(mat, mat2)
- return tuple((res / res[2, 2]).ravel()[:8])
-
-
-def persp_apply(mat, pts):
- """homography (perspective) transformation.
- mat: 8-tuple (homography transform)
- pts: numpy array
- """
- assert isinstance(mat, tuple)
- assert isinstance(pts, np.ndarray)
- assert pts.shape[-1] == 2
- mat = np.float32(mat + (1,)).reshape(3, 3)
-
- if pts.ndim == 1:
- pt = np.dot(pts, mat[:, :2].T).ravel() + mat[:, 2]
- pt /= pt[2] # homogeneous coordinates
- return tuple(pt[:2])
- else:
- pt = np.dot(pts, mat[:, :2].T) + mat[:, 2]
- pt[:, :2] /= pt[:, 2:3] # homogeneous coordinates
- return pt[:, :2]
-
-
-def is_pil_image(img):
- return isinstance(img, Image.Image)
-
-
-def adjust_brightness(img, brightness_factor):
- """Adjust brightness of an Image.
- Args:
- img (PIL Image): PIL Image to be adjusted.
- brightness_factor (float): How much to adjust the brightness. Can be
- any non negative number. 0 gives a black image, 1 gives the
- original image while 2 increases the brightness by a factor of 2.
- Returns:
- PIL Image: Brightness adjusted image.
- Copied from https://github.com/pytorch in torchvision/transforms/functional.py
- """
- if not is_pil_image(img):
- raise TypeError("img should be PIL Image. Got {}".format(type(img)))
-
- enhancer = ImageEnhance.Brightness(img)
- img = enhancer.enhance(brightness_factor)
- return img
-
-
-def adjust_contrast(img, contrast_factor):
- """Adjust contrast of an Image.
- Args:
- img (PIL Image): PIL Image to be adjusted.
- contrast_factor (float): How much to adjust the contrast. Can be any
- non negative number. 0 gives a solid gray image, 1 gives the
- original image while 2 increases the contrast by a factor of 2.
- Returns:
- PIL Image: Contrast adjusted image.
- Copied from https://github.com/pytorch in torchvision/transforms/functional.py
- """
- if not is_pil_image(img):
- raise TypeError("img should be PIL Image. Got {}".format(type(img)))
-
- enhancer = ImageEnhance.Contrast(img)
- img = enhancer.enhance(contrast_factor)
- return img
-
-
-def adjust_saturation(img, saturation_factor):
- """Adjust color saturation of an image.
- Args:
- img (PIL Image): PIL Image to be adjusted.
- saturation_factor (float): How much to adjust the saturation. 0 will
- give a black and white image, 1 will give the original image while
- 2 will enhance the saturation by a factor of 2.
- Returns:
- PIL Image: Saturation adjusted image.
- Copied from https://github.com/pytorch in torchvision/transforms/functional.py
- """
- if not is_pil_image(img):
- raise TypeError("img should be PIL Image. Got {}".format(type(img)))
-
- enhancer = ImageEnhance.Color(img)
- img = enhancer.enhance(saturation_factor)
- return img
-
-
-def adjust_hue(img, hue_factor):
- """Adjust hue of an image.
- The image hue is adjusted by converting the image to HSV and
- cyclically shifting the intensities in the hue channel (H).
- The image is then converted back to original image mode.
- `hue_factor` is the amount of shift in H channel and must be in the
- interval `[-0.5, 0.5]`.
- See https://en.wikipedia.org/wiki/Hue for more details on Hue.
- Args:
- img (PIL Image): PIL Image to be adjusted.
- hue_factor (float): How much to shift the hue channel. Should be in
- [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
- HSV space in positive and negative direction respectively.
- 0 means no shift. Therefore, both -0.5 and 0.5 will give an image
- with complementary colors while 0 gives the original image.
- Returns:
- PIL Image: Hue adjusted image.
- Copied from https://github.com/pytorch in torchvision/transforms/functional.py
- """
- if not (-0.5 <= hue_factor <= 0.5):
- raise ValueError("hue_factor is not in [-0.5, 0.5].".format(hue_factor))
-
- if not is_pil_image(img):
- raise TypeError("img should be PIL Image. Got {}".format(type(img)))
-
- input_mode = img.mode
- if input_mode in {"L", "1", "I", "F"}:
- return img
-
- h, s, v = img.convert("HSV").split()
-
- np_h = np.array(h, dtype=np.uint8)
- # uint8 addition take cares of rotation across boundaries
- with np.errstate(over="ignore"):
- np_h += np.uint8(hue_factor * 255)
- h = Image.fromarray(np_h, "L")
-
- img = Image.merge("HSV", (h, s, v)).convert(input_mode)
- return img
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py
deleted file mode 100644
index d7a43bee01422ad4795dd27874e0cd4bb6cbfecf..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained='open-mmlab://resnet50_v1c',
- backbone=dict(
- type='ResNetV1c',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 2, 4),
- strides=(1, 2, 1, 1),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='ASPPHead',
- in_channels=2048,
- in_index=3,
- channels=512,
- dilations=(1, 12, 24, 36),
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/upsample.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/upsample.py
deleted file mode 100644
index a1a353767d0ce8518f0d7289bed10dba0178ed12..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/upsample.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch.nn as nn
-import torch.nn.functional as F
-
-from ..utils import xavier_init
-from .registry import UPSAMPLE_LAYERS
-
-UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample)
-UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample)
-
-
-@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle')
-class PixelShufflePack(nn.Module):
- """Pixel Shuffle upsample layer.
-
- This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to
- achieve a simple upsampling with pixel shuffle.
-
- Args:
- in_channels (int): Number of input channels.
- out_channels (int): Number of output channels.
- scale_factor (int): Upsample ratio.
- upsample_kernel (int): Kernel size of the conv layer to expand the
- channels.
- """
-
- def __init__(self, in_channels, out_channels, scale_factor,
- upsample_kernel):
- super(PixelShufflePack, self).__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.scale_factor = scale_factor
- self.upsample_kernel = upsample_kernel
- self.upsample_conv = nn.Conv2d(
- self.in_channels,
- self.out_channels * scale_factor * scale_factor,
- self.upsample_kernel,
- padding=(self.upsample_kernel - 1) // 2)
- self.init_weights()
-
- def init_weights(self):
- xavier_init(self.upsample_conv, distribution='uniform')
-
- def forward(self, x):
- x = self.upsample_conv(x)
- x = F.pixel_shuffle(x, self.scale_factor)
- return x
-
-
-def build_upsample_layer(cfg, *args, **kwargs):
- """Build upsample layer.
-
- Args:
- cfg (dict): The upsample layer config, which should contain:
-
- - type (str): Layer type.
- - scale_factor (int): Upsample ratio, which is not applicable to
- deconv.
- - layer args: Args needed to instantiate a upsample layer.
- args (argument list): Arguments passed to the ``__init__``
- method of the corresponding conv layer.
- kwargs (keyword arguments): Keyword arguments passed to the
- ``__init__`` method of the corresponding conv layer.
-
- Returns:
- nn.Module: Created upsample layer.
- """
- if not isinstance(cfg, dict):
- raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
- if 'type' not in cfg:
- raise KeyError(
- f'the cfg dict must contain the key "type", but got {cfg}')
- cfg_ = cfg.copy()
-
- layer_type = cfg_.pop('type')
- if layer_type not in UPSAMPLE_LAYERS:
- raise KeyError(f'Unrecognized upsample type {layer_type}')
- else:
- upsample = UPSAMPLE_LAYERS.get(layer_type)
-
- if upsample is nn.Upsample:
- cfg_['mode'] = layer_type
- layer = upsample(*args, **kwargs, **cfg_)
- return layer
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/core/utils/misc.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/core/utils/misc.py
deleted file mode 100644
index 3e22c7b9085317b61a25c67d361f7e70df65bed1..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/core/utils/misc.py
+++ /dev/null
@@ -1,61 +0,0 @@
-from functools import partial
-
-import numpy as np
-import torch
-from six.moves import map, zip
-
-from ..mask.structures import BitmapMasks, PolygonMasks
-
-
-def multi_apply(func, *args, **kwargs):
- """Apply function to a list of arguments.
-
- Note:
- This function applies the ``func`` to multiple inputs and
- map the multiple outputs of the ``func`` into different
- list. Each list contains the same type of outputs corresponding
- to different inputs.
-
- Args:
- func (Function): A function that will be applied to a list of
- arguments
-
- Returns:
- tuple(list): A tuple containing multiple list, each list contains \
- a kind of returned results by the function
- """
- pfunc = partial(func, **kwargs) if kwargs else func
- map_results = map(pfunc, *args)
- return tuple(map(list, zip(*map_results)))
-
-
-def unmap(data, count, inds, fill=0):
- """Unmap a subset of item (data) back to the original set of items (of size
- count)"""
- if data.dim() == 1:
- ret = data.new_full((count, ), fill)
- ret[inds.type(torch.bool)] = data
- else:
- new_size = (count, ) + data.size()[1:]
- ret = data.new_full(new_size, fill)
- ret[inds.type(torch.bool), :] = data
- return ret
-
-
-def mask2ndarray(mask):
- """Convert Mask to ndarray..
-
- Args:
- mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or
- torch.Tensor or np.ndarray): The mask to be converted.
-
- Returns:
- np.ndarray: Ndarray mask of shape (n, h, w) that has been converted
- """
- if isinstance(mask, (BitmapMasks, PolygonMasks)):
- mask = mask.to_ndarray()
- elif isinstance(mask, torch.Tensor):
- mask = mask.detach().cpu().numpy()
- elif not isinstance(mask, np.ndarray):
- raise TypeError(f'Unsupported {type(mask)} data type')
- return mask
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/necks/yolo_neck.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/necks/yolo_neck.py
deleted file mode 100644
index c2f9b9ef3859796c284c16ad1a92fe41ecbed613..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/necks/yolo_neck.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# Copyright (c) 2019 Western Digital Corporation or its affiliates.
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.cnn import ConvModule
-
-from ..builder import NECKS
-
-
-class DetectionBlock(nn.Module):
- """Detection block in YOLO neck.
-
- Let out_channels = n, the DetectionBlock contains:
- Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.
- The first 6 ConvLayers are formed the following way:
- 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.
- The Conv2D layer is 1x1x255.
- Some block will have branch after the fifth ConvLayer.
- The input channel is arbitrary (in_channels)
-
- Args:
- in_channels (int): The number of input channels.
- out_channels (int): The number of output channels.
- conv_cfg (dict): Config dict for convolution layer. Default: None.
- norm_cfg (dict): Dictionary to construct and config norm layer.
- Default: dict(type='BN', requires_grad=True)
- act_cfg (dict): Config dict for activation layer.
- Default: dict(type='LeakyReLU', negative_slope=0.1).
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- conv_cfg=None,
- norm_cfg=dict(type='BN', requires_grad=True),
- act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
- super(DetectionBlock, self).__init__()
- double_out_channels = out_channels * 2
-
- # shortcut
- cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
- self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg)
- self.conv2 = ConvModule(
- out_channels, double_out_channels, 3, padding=1, **cfg)
- self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg)
- self.conv4 = ConvModule(
- out_channels, double_out_channels, 3, padding=1, **cfg)
- self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg)
-
- def forward(self, x):
- tmp = self.conv1(x)
- tmp = self.conv2(tmp)
- tmp = self.conv3(tmp)
- tmp = self.conv4(tmp)
- out = self.conv5(tmp)
- return out
-
-
-@NECKS.register_module()
-class YOLOV3Neck(nn.Module):
- """The neck of YOLOV3.
-
- It can be treated as a simplified version of FPN. It
- will take the result from Darknet backbone and do some upsampling and
- concatenation. It will finally output the detection result.
-
- Note:
- The input feats should be from top to bottom.
- i.e., from high-lvl to low-lvl
- But YOLOV3Neck will process them in reversed order.
- i.e., from bottom (high-lvl) to top (low-lvl)
-
- Args:
- num_scales (int): The number of scales / stages.
- in_channels (int): The number of input channels.
- out_channels (int): The number of output channels.
- conv_cfg (dict): Config dict for convolution layer. Default: None.
- norm_cfg (dict): Dictionary to construct and config norm layer.
- Default: dict(type='BN', requires_grad=True)
- act_cfg (dict): Config dict for activation layer.
- Default: dict(type='LeakyReLU', negative_slope=0.1).
- """
-
- def __init__(self,
- num_scales,
- in_channels,
- out_channels,
- conv_cfg=None,
- norm_cfg=dict(type='BN', requires_grad=True),
- act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
- super(YOLOV3Neck, self).__init__()
- assert (num_scales == len(in_channels) == len(out_channels))
- self.num_scales = num_scales
- self.in_channels = in_channels
- self.out_channels = out_channels
-
- # shortcut
- cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
-
- # To support arbitrary scales, the code looks awful, but it works.
- # Better solution is welcomed.
- self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg)
- for i in range(1, self.num_scales):
- in_c, out_c = self.in_channels[i], self.out_channels[i]
- self.add_module(f'conv{i}', ConvModule(in_c, out_c, 1, **cfg))
- # in_c + out_c : High-lvl feats will be cat with low-lvl feats
- self.add_module(f'detect{i+1}',
- DetectionBlock(in_c + out_c, out_c, **cfg))
-
- def forward(self, feats):
- assert len(feats) == self.num_scales
-
- # processed from bottom (high-lvl) to top (low-lvl)
- outs = []
- out = self.detect1(feats[-1])
- outs.append(out)
-
- for i, x in enumerate(reversed(feats[:-1])):
- conv = getattr(self, f'conv{i+1}')
- tmp = conv(out)
-
- # Cat with low-lvl feats
- tmp = F.interpolate(tmp, scale_factor=2)
- tmp = torch.cat((tmp, x), 1)
-
- detect = getattr(self, f'detect{i+2}')
- out = detect(tmp)
- outs.append(out)
-
- return tuple(outs)
-
- def init_weights(self):
- """Initialize the weights of module."""
- # init is done in ConvModule
- pass
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/cnn/utils/weight_init.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/cnn/utils/weight_init.py
deleted file mode 100644
index 287a1d0bffe26e023029d48634d9b761deda7ba4..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/cnn/utils/weight_init.py
+++ /dev/null
@@ -1,684 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import copy
-import math
-import warnings
-
-import numpy as np
-import torch
-import torch.nn as nn
-from torch import Tensor
-
-from annotator.uniformer.mmcv.utils import Registry, build_from_cfg, get_logger, print_log
-
-INITIALIZERS = Registry('initializer')
-
-
-def update_init_info(module, init_info):
- """Update the `_params_init_info` in the module if the value of parameters
- are changed.
-
- Args:
- module (obj:`nn.Module`): The module of PyTorch with a user-defined
- attribute `_params_init_info` which records the initialization
- information.
- init_info (str): The string that describes the initialization.
- """
- assert hasattr(
- module,
- '_params_init_info'), f'Can not find `_params_init_info` in {module}'
- for name, param in module.named_parameters():
-
- assert param in module._params_init_info, (
- f'Find a new :obj:`Parameter` '
- f'named `{name}` during executing the '
- f'`init_weights` of '
- f'`{module.__class__.__name__}`. '
- f'Please do not add or '
- f'replace parameters during executing '
- f'the `init_weights`. ')
-
- # The parameter has been changed during executing the
- # `init_weights` of module
- mean_value = param.data.mean()
- if module._params_init_info[param]['tmp_mean_value'] != mean_value:
- module._params_init_info[param]['init_info'] = init_info
- module._params_init_info[param]['tmp_mean_value'] = mean_value
-
-
-def constant_init(module, val, bias=0):
- if hasattr(module, 'weight') and module.weight is not None:
- nn.init.constant_(module.weight, val)
- if hasattr(module, 'bias') and module.bias is not None:
- nn.init.constant_(module.bias, bias)
-
-
-def xavier_init(module, gain=1, bias=0, distribution='normal'):
- assert distribution in ['uniform', 'normal']
- if hasattr(module, 'weight') and module.weight is not None:
- if distribution == 'uniform':
- nn.init.xavier_uniform_(module.weight, gain=gain)
- else:
- nn.init.xavier_normal_(module.weight, gain=gain)
- if hasattr(module, 'bias') and module.bias is not None:
- nn.init.constant_(module.bias, bias)
-
-
-def normal_init(module, mean=0, std=1, bias=0):
- if hasattr(module, 'weight') and module.weight is not None:
- nn.init.normal_(module.weight, mean, std)
- if hasattr(module, 'bias') and module.bias is not None:
- nn.init.constant_(module.bias, bias)
-
-
-def trunc_normal_init(module: nn.Module,
- mean: float = 0,
- std: float = 1,
- a: float = -2,
- b: float = 2,
- bias: float = 0) -> None:
- if hasattr(module, 'weight') and module.weight is not None:
- trunc_normal_(module.weight, mean, std, a, b) # type: ignore
- if hasattr(module, 'bias') and module.bias is not None:
- nn.init.constant_(module.bias, bias) # type: ignore
-
-
-def uniform_init(module, a=0, b=1, bias=0):
- if hasattr(module, 'weight') and module.weight is not None:
- nn.init.uniform_(module.weight, a, b)
- if hasattr(module, 'bias') and module.bias is not None:
- nn.init.constant_(module.bias, bias)
-
-
-def kaiming_init(module,
- a=0,
- mode='fan_out',
- nonlinearity='relu',
- bias=0,
- distribution='normal'):
- assert distribution in ['uniform', 'normal']
- if hasattr(module, 'weight') and module.weight is not None:
- if distribution == 'uniform':
- nn.init.kaiming_uniform_(
- module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
- else:
- nn.init.kaiming_normal_(
- module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
- if hasattr(module, 'bias') and module.bias is not None:
- nn.init.constant_(module.bias, bias)
-
-
-def caffe2_xavier_init(module, bias=0):
- # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
- # Acknowledgment to FAIR's internal code
- kaiming_init(
- module,
- a=1,
- mode='fan_in',
- nonlinearity='leaky_relu',
- bias=bias,
- distribution='uniform')
-
-
-def bias_init_with_prob(prior_prob):
- """initialize conv/fc bias value according to a given probability value."""
- bias_init = float(-np.log((1 - prior_prob) / prior_prob))
- return bias_init
-
-
-def _get_bases_name(m):
- return [b.__name__ for b in m.__class__.__bases__]
-
-
-class BaseInit(object):
-
- def __init__(self, *, bias=0, bias_prob=None, layer=None):
- self.wholemodule = False
- if not isinstance(bias, (int, float)):
- raise TypeError(f'bias must be a number, but got a {type(bias)}')
-
- if bias_prob is not None:
- if not isinstance(bias_prob, float):
- raise TypeError(f'bias_prob type must be float, \
- but got {type(bias_prob)}')
-
- if layer is not None:
- if not isinstance(layer, (str, list)):
- raise TypeError(f'layer must be a str or a list of str, \
- but got a {type(layer)}')
- else:
- layer = []
-
- if bias_prob is not None:
- self.bias = bias_init_with_prob(bias_prob)
- else:
- self.bias = bias
- self.layer = [layer] if isinstance(layer, str) else layer
-
- def _get_init_info(self):
- info = f'{self.__class__.__name__}, bias={self.bias}'
- return info
-
-
-@INITIALIZERS.register_module(name='Constant')
-class ConstantInit(BaseInit):
- """Initialize module parameters with constant values.
-
- Args:
- val (int | float): the value to fill the weights in the module with
- bias (int | float): the value to fill the bias. Defaults to 0.
- bias_prob (float, optional): the probability for bias initialization.
- Defaults to None.
- layer (str | list[str], optional): the layer will be initialized.
- Defaults to None.
- """
-
- def __init__(self, val, **kwargs):
- super().__init__(**kwargs)
- self.val = val
-
- def __call__(self, module):
-
- def init(m):
- if self.wholemodule:
- constant_init(m, self.val, self.bias)
- else:
- layername = m.__class__.__name__
- basesname = _get_bases_name(m)
- if len(set(self.layer) & set([layername] + basesname)):
- constant_init(m, self.val, self.bias)
-
- module.apply(init)
- if hasattr(module, '_params_init_info'):
- update_init_info(module, init_info=self._get_init_info())
-
- def _get_init_info(self):
- info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}'
- return info
-
-
-@INITIALIZERS.register_module(name='Xavier')
-class XavierInit(BaseInit):
- r"""Initialize module parameters with values according to the method
- described in `Understanding the difficulty of training deep feedforward
- neural networks - Glorot, X. & Bengio, Y. (2010).
- `_
-
- Args:
- gain (int | float): an optional scaling factor. Defaults to 1.
- bias (int | float): the value to fill the bias. Defaults to 0.
- bias_prob (float, optional): the probability for bias initialization.
- Defaults to None.
- distribution (str): distribution either be ``'normal'``
- or ``'uniform'``. Defaults to ``'normal'``.
- layer (str | list[str], optional): the layer will be initialized.
- Defaults to None.
- """
-
- def __init__(self, gain=1, distribution='normal', **kwargs):
- super().__init__(**kwargs)
- self.gain = gain
- self.distribution = distribution
-
- def __call__(self, module):
-
- def init(m):
- if self.wholemodule:
- xavier_init(m, self.gain, self.bias, self.distribution)
- else:
- layername = m.__class__.__name__
- basesname = _get_bases_name(m)
- if len(set(self.layer) & set([layername] + basesname)):
- xavier_init(m, self.gain, self.bias, self.distribution)
-
- module.apply(init)
- if hasattr(module, '_params_init_info'):
- update_init_info(module, init_info=self._get_init_info())
-
- def _get_init_info(self):
- info = f'{self.__class__.__name__}: gain={self.gain}, ' \
- f'distribution={self.distribution}, bias={self.bias}'
- return info
-
-
-@INITIALIZERS.register_module(name='Normal')
-class NormalInit(BaseInit):
- r"""Initialize module parameters with the values drawn from the normal
- distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
-
- Args:
- mean (int | float):the mean of the normal distribution. Defaults to 0.
- std (int | float): the standard deviation of the normal distribution.
- Defaults to 1.
- bias (int | float): the value to fill the bias. Defaults to 0.
- bias_prob (float, optional): the probability for bias initialization.
- Defaults to None.
- layer (str | list[str], optional): the layer will be initialized.
- Defaults to None.
-
- """
-
- def __init__(self, mean=0, std=1, **kwargs):
- super().__init__(**kwargs)
- self.mean = mean
- self.std = std
-
- def __call__(self, module):
-
- def init(m):
- if self.wholemodule:
- normal_init(m, self.mean, self.std, self.bias)
- else:
- layername = m.__class__.__name__
- basesname = _get_bases_name(m)
- if len(set(self.layer) & set([layername] + basesname)):
- normal_init(m, self.mean, self.std, self.bias)
-
- module.apply(init)
- if hasattr(module, '_params_init_info'):
- update_init_info(module, init_info=self._get_init_info())
-
- def _get_init_info(self):
- info = f'{self.__class__.__name__}: mean={self.mean},' \
- f' std={self.std}, bias={self.bias}'
- return info
-
-
-@INITIALIZERS.register_module(name='TruncNormal')
-class TruncNormalInit(BaseInit):
- r"""Initialize module parameters with the values drawn from the normal
- distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values
- outside :math:`[a, b]`.
-
- Args:
- mean (float): the mean of the normal distribution. Defaults to 0.
- std (float): the standard deviation of the normal distribution.
- Defaults to 1.
- a (float): The minimum cutoff value.
- b ( float): The maximum cutoff value.
- bias (float): the value to fill the bias. Defaults to 0.
- bias_prob (float, optional): the probability for bias initialization.
- Defaults to None.
- layer (str | list[str], optional): the layer will be initialized.
- Defaults to None.
-
- """
-
- def __init__(self,
- mean: float = 0,
- std: float = 1,
- a: float = -2,
- b: float = 2,
- **kwargs) -> None:
- super().__init__(**kwargs)
- self.mean = mean
- self.std = std
- self.a = a
- self.b = b
-
- def __call__(self, module: nn.Module) -> None:
-
- def init(m):
- if self.wholemodule:
- trunc_normal_init(m, self.mean, self.std, self.a, self.b,
- self.bias)
- else:
- layername = m.__class__.__name__
- basesname = _get_bases_name(m)
- if len(set(self.layer) & set([layername] + basesname)):
- trunc_normal_init(m, self.mean, self.std, self.a, self.b,
- self.bias)
-
- module.apply(init)
- if hasattr(module, '_params_init_info'):
- update_init_info(module, init_info=self._get_init_info())
-
- def _get_init_info(self):
- info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \
- f' mean={self.mean}, std={self.std}, bias={self.bias}'
- return info
-
-
-@INITIALIZERS.register_module(name='Uniform')
-class UniformInit(BaseInit):
- r"""Initialize module parameters with values drawn from the uniform
- distribution :math:`\mathcal{U}(a, b)`.
-
- Args:
- a (int | float): the lower bound of the uniform distribution.
- Defaults to 0.
- b (int | float): the upper bound of the uniform distribution.
- Defaults to 1.
- bias (int | float): the value to fill the bias. Defaults to 0.
- bias_prob (float, optional): the probability for bias initialization.
- Defaults to None.
- layer (str | list[str], optional): the layer will be initialized.
- Defaults to None.
- """
-
- def __init__(self, a=0, b=1, **kwargs):
- super().__init__(**kwargs)
- self.a = a
- self.b = b
-
- def __call__(self, module):
-
- def init(m):
- if self.wholemodule:
- uniform_init(m, self.a, self.b, self.bias)
- else:
- layername = m.__class__.__name__
- basesname = _get_bases_name(m)
- if len(set(self.layer) & set([layername] + basesname)):
- uniform_init(m, self.a, self.b, self.bias)
-
- module.apply(init)
- if hasattr(module, '_params_init_info'):
- update_init_info(module, init_info=self._get_init_info())
-
- def _get_init_info(self):
- info = f'{self.__class__.__name__}: a={self.a},' \
- f' b={self.b}, bias={self.bias}'
- return info
-
-
-@INITIALIZERS.register_module(name='Kaiming')
-class KaimingInit(BaseInit):
- r"""Initialize module parameters with the values according to the method
- described in `Delving deep into rectifiers: Surpassing human-level
- performance on ImageNet classification - He, K. et al. (2015).
- `_
-
- Args:
- a (int | float): the negative slope of the rectifier used after this
- layer (only used with ``'leaky_relu'``). Defaults to 0.
- mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing
- ``'fan_in'`` preserves the magnitude of the variance of the weights
- in the forward pass. Choosing ``'fan_out'`` preserves the
- magnitudes in the backwards pass. Defaults to ``'fan_out'``.
- nonlinearity (str): the non-linear function (`nn.functional` name),
- recommended to use only with ``'relu'`` or ``'leaky_relu'`` .
- Defaults to 'relu'.
- bias (int | float): the value to fill the bias. Defaults to 0.
- bias_prob (float, optional): the probability for bias initialization.
- Defaults to None.
- distribution (str): distribution either be ``'normal'`` or
- ``'uniform'``. Defaults to ``'normal'``.
- layer (str | list[str], optional): the layer will be initialized.
- Defaults to None.
- """
-
- def __init__(self,
- a=0,
- mode='fan_out',
- nonlinearity='relu',
- distribution='normal',
- **kwargs):
- super().__init__(**kwargs)
- self.a = a
- self.mode = mode
- self.nonlinearity = nonlinearity
- self.distribution = distribution
-
- def __call__(self, module):
-
- def init(m):
- if self.wholemodule:
- kaiming_init(m, self.a, self.mode, self.nonlinearity,
- self.bias, self.distribution)
- else:
- layername = m.__class__.__name__
- basesname = _get_bases_name(m)
- if len(set(self.layer) & set([layername] + basesname)):
- kaiming_init(m, self.a, self.mode, self.nonlinearity,
- self.bias, self.distribution)
-
- module.apply(init)
- if hasattr(module, '_params_init_info'):
- update_init_info(module, init_info=self._get_init_info())
-
- def _get_init_info(self):
- info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \
- f'nonlinearity={self.nonlinearity}, ' \
- f'distribution ={self.distribution}, bias={self.bias}'
- return info
-
-
-@INITIALIZERS.register_module(name='Caffe2Xavier')
-class Caffe2XavierInit(KaimingInit):
- # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
- # Acknowledgment to FAIR's internal code
- def __init__(self, **kwargs):
- super().__init__(
- a=1,
- mode='fan_in',
- nonlinearity='leaky_relu',
- distribution='uniform',
- **kwargs)
-
- def __call__(self, module):
- super().__call__(module)
-
-
-@INITIALIZERS.register_module(name='Pretrained')
-class PretrainedInit(object):
- """Initialize module by loading a pretrained model.
-
- Args:
- checkpoint (str): the checkpoint file of the pretrained model should
- be load.
- prefix (str, optional): the prefix of a sub-module in the pretrained
- model. it is for loading a part of the pretrained model to
- initialize. For example, if we would like to only load the
- backbone of a detector model, we can set ``prefix='backbone.'``.
- Defaults to None.
- map_location (str): map tensors into proper locations.
- """
-
- def __init__(self, checkpoint, prefix=None, map_location=None):
- self.checkpoint = checkpoint
- self.prefix = prefix
- self.map_location = map_location
-
- def __call__(self, module):
- from annotator.uniformer.mmcv.runner import (_load_checkpoint_with_prefix, load_checkpoint,
- load_state_dict)
- logger = get_logger('mmcv')
- if self.prefix is None:
- print_log(f'load model from: {self.checkpoint}', logger=logger)
- load_checkpoint(
- module,
- self.checkpoint,
- map_location=self.map_location,
- strict=False,
- logger=logger)
- else:
- print_log(
- f'load {self.prefix} in model from: {self.checkpoint}',
- logger=logger)
- state_dict = _load_checkpoint_with_prefix(
- self.prefix, self.checkpoint, map_location=self.map_location)
- load_state_dict(module, state_dict, strict=False, logger=logger)
-
- if hasattr(module, '_params_init_info'):
- update_init_info(module, init_info=self._get_init_info())
-
- def _get_init_info(self):
- info = f'{self.__class__.__name__}: load from {self.checkpoint}'
- return info
-
-
-def _initialize(module, cfg, wholemodule=False):
- func = build_from_cfg(cfg, INITIALIZERS)
- # wholemodule flag is for override mode, there is no layer key in override
- # and initializer will give init values for the whole module with the name
- # in override.
- func.wholemodule = wholemodule
- func(module)
-
-
-def _initialize_override(module, override, cfg):
- if not isinstance(override, (dict, list)):
- raise TypeError(f'override must be a dict or a list of dict, \
- but got {type(override)}')
-
- override = [override] if isinstance(override, dict) else override
-
- for override_ in override:
-
- cp_override = copy.deepcopy(override_)
- name = cp_override.pop('name', None)
- if name is None:
- raise ValueError('`override` must contain the key "name",'
- f'but got {cp_override}')
- # if override only has name key, it means use args in init_cfg
- if not cp_override:
- cp_override.update(cfg)
- # if override has name key and other args except type key, it will
- # raise error
- elif 'type' not in cp_override.keys():
- raise ValueError(
- f'`override` need "type" key, but got {cp_override}')
-
- if hasattr(module, name):
- _initialize(getattr(module, name), cp_override, wholemodule=True)
- else:
- raise RuntimeError(f'module did not have attribute {name}, '
- f'but init_cfg is {cp_override}.')
-
-
-def initialize(module, init_cfg):
- """Initialize a module.
-
- Args:
- module (``torch.nn.Module``): the module will be initialized.
- init_cfg (dict | list[dict]): initialization configuration dict to
- define initializer. OpenMMLab has implemented 6 initializers
- including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,
- ``Kaiming``, and ``Pretrained``.
- Example:
- >>> module = nn.Linear(2, 3, bias=True)
- >>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)
- >>> initialize(module, init_cfg)
-
- >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))
- >>> # define key ``'layer'`` for initializing layer with different
- >>> # configuration
- >>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),
- dict(type='Constant', layer='Linear', val=2)]
- >>> initialize(module, init_cfg)
-
- >>> # define key``'override'`` to initialize some specific part in
- >>> # module
- >>> class FooNet(nn.Module):
- >>> def __init__(self):
- >>> super().__init__()
- >>> self.feat = nn.Conv2d(3, 16, 3)
- >>> self.reg = nn.Conv2d(16, 10, 3)
- >>> self.cls = nn.Conv2d(16, 5, 3)
- >>> model = FooNet()
- >>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',
- >>> override=dict(type='Constant', name='reg', val=3, bias=4))
- >>> initialize(model, init_cfg)
-
- >>> model = ResNet(depth=50)
- >>> # Initialize weights with the pretrained model.
- >>> init_cfg = dict(type='Pretrained',
- checkpoint='torchvision://resnet50')
- >>> initialize(model, init_cfg)
-
- >>> # Initialize weights of a sub-module with the specific part of
- >>> # a pretrained model by using "prefix".
- >>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\
- >>> 'retinanet_r50_fpn_1x_coco/'\
- >>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'
- >>> init_cfg = dict(type='Pretrained',
- checkpoint=url, prefix='backbone.')
- """
- if not isinstance(init_cfg, (dict, list)):
- raise TypeError(f'init_cfg must be a dict or a list of dict, \
- but got {type(init_cfg)}')
-
- if isinstance(init_cfg, dict):
- init_cfg = [init_cfg]
-
- for cfg in init_cfg:
- # should deeply copy the original config because cfg may be used by
- # other modules, e.g., one init_cfg shared by multiple bottleneck
- # blocks, the expected cfg will be changed after pop and will change
- # the initialization behavior of other modules
- cp_cfg = copy.deepcopy(cfg)
- override = cp_cfg.pop('override', None)
- _initialize(module, cp_cfg)
-
- if override is not None:
- cp_cfg.pop('layer', None)
- _initialize_override(module, override, cp_cfg)
- else:
- # All attributes in module have same initialization.
- pass
-
-
-def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float,
- b: float) -> Tensor:
- # Method based on
- # https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
- # Modified from
- # https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
- def norm_cdf(x):
- # Computes standard normal cumulative distribution function
- return (1. + math.erf(x / math.sqrt(2.))) / 2.
-
- if (mean < a - 2 * std) or (mean > b + 2 * std):
- warnings.warn(
- 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
- 'The distribution of values may be incorrect.',
- stacklevel=2)
-
- with torch.no_grad():
- # Values are generated by using a truncated uniform distribution and
- # then using the inverse CDF for the normal distribution.
- # Get upper and lower cdf values
- lower = norm_cdf((a - mean) / std)
- upper = norm_cdf((b - mean) / std)
-
- # Uniformly fill tensor with values from [lower, upper], then translate
- # to [2lower-1, 2upper-1].
- tensor.uniform_(2 * lower - 1, 2 * upper - 1)
-
- # Use inverse cdf transform for normal distribution to get truncated
- # standard normal
- tensor.erfinv_()
-
- # Transform to proper mean, std
- tensor.mul_(std * math.sqrt(2.))
- tensor.add_(mean)
-
- # Clamp to ensure it's in the proper range
- tensor.clamp_(min=a, max=b)
- return tensor
-
-
-def trunc_normal_(tensor: Tensor,
- mean: float = 0.,
- std: float = 1.,
- a: float = -2.,
- b: float = 2.) -> Tensor:
- r"""Fills the input Tensor with values drawn from a truncated
- normal distribution. The values are effectively drawn from the
- normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
- with values outside :math:`[a, b]` redrawn until they are within
- the bounds. The method used for generating the random values works
- best when :math:`a \leq \text{mean} \leq b`.
-
- Modified from
- https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
-
- Args:
- tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.
- mean (float): the mean of the normal distribution.
- std (float): the standard deviation of the normal distribution.
- a (float): the minimum cutoff value.
- b (float): the maximum cutoff value.
- """
- return _no_grad_trunc_normal_(tensor, mean, std, a, b)
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/utils/ext_loader.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/utils/ext_loader.py
deleted file mode 100644
index 08132d2c1b9a1c28880e4bab4d4fa1ba39d9d083..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/utils/ext_loader.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import importlib
-import os
-import pkgutil
-import warnings
-from collections import namedtuple
-
-import torch
-
-if torch.__version__ != 'parrots':
-
- def load_ext(name, funcs):
- ext = importlib.import_module('mmcv.' + name)
- for fun in funcs:
- assert hasattr(ext, fun), f'{fun} miss in module {name}'
- return ext
-else:
- from parrots import extension
- from parrots.base import ParrotsException
-
- has_return_value_ops = [
- 'nms',
- 'softnms',
- 'nms_match',
- 'nms_rotated',
- 'top_pool_forward',
- 'top_pool_backward',
- 'bottom_pool_forward',
- 'bottom_pool_backward',
- 'left_pool_forward',
- 'left_pool_backward',
- 'right_pool_forward',
- 'right_pool_backward',
- 'fused_bias_leakyrelu',
- 'upfirdn2d',
- 'ms_deform_attn_forward',
- 'pixel_group',
- 'contour_expand',
- ]
-
- def get_fake_func(name, e):
-
- def fake_func(*args, **kwargs):
- warnings.warn(f'{name} is not supported in parrots now')
- raise e
-
- return fake_func
-
- def load_ext(name, funcs):
- ExtModule = namedtuple('ExtModule', funcs)
- ext_list = []
- lib_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
- for fun in funcs:
- try:
- ext_fun = extension.load(fun, name, lib_dir=lib_root)
- except ParrotsException as e:
- if 'No element registered' not in e.message:
- warnings.warn(e.message)
- ext_fun = get_fake_func(fun, e)
- ext_list.append(ext_fun)
- else:
- if fun in has_return_value_ops:
- ext_list.append(ext_fun.op)
- else:
- ext_list.append(ext_fun.op_)
- return ExtModule(*ext_list)
-
-
-def check_ops_exist():
- ext_loader = pkgutil.find_loader('mmcv._ext')
- return ext_loader is not None
diff --git a/spaces/Ryukijano/Real-CUGAN/upcunet_v3.py b/spaces/Ryukijano/Real-CUGAN/upcunet_v3.py
deleted file mode 100644
index f7919a6cc9efe3b8af73a73e30825a4c7d7d76da..0000000000000000000000000000000000000000
--- a/spaces/Ryukijano/Real-CUGAN/upcunet_v3.py
+++ /dev/null
@@ -1,714 +0,0 @@
-import torch
-from torch import nn as nn
-from torch.nn import functional as F
-import os, sys
-import numpy as np
-
-root_path = os.path.abspath('.')
-sys.path.append(root_path)
-
-
-class SEBlock(nn.Module):
- def __init__(self, in_channels, reduction=8, bias=False):
- super(SEBlock, self).__init__()
- self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias)
- self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias)
-
- def forward(self, x):
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half()
- else:
- x0 = torch.mean(x, dim=(2, 3), keepdim=True)
- x0 = self.conv1(x0)
- x0 = F.relu(x0, inplace=True)
- x0 = self.conv2(x0)
- x0 = torch.sigmoid(x0)
- x = torch.mul(x, x0)
- return x
-
- def forward_mean(self, x, x0):
- x0 = self.conv1(x0)
- x0 = F.relu(x0, inplace=True)
- x0 = self.conv2(x0)
- x0 = torch.sigmoid(x0)
- x = torch.mul(x, x0)
- return x
-
-
-class UNetConv(nn.Module):
- def __init__(self, in_channels, mid_channels, out_channels, se):
- super(UNetConv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(in_channels, mid_channels, 3, 1, 0),
- nn.LeakyReLU(0.1, inplace=True),
- nn.Conv2d(mid_channels, out_channels, 3, 1, 0),
- nn.LeakyReLU(0.1, inplace=True),
- )
- if se:
- self.seblock = SEBlock(out_channels, reduction=8, bias=True)
- else:
- self.seblock = None
-
- def forward(self, x):
- z = self.conv(x)
- if self.seblock is not None:
- z = self.seblock(z)
- return z
-
-
-class UNet1(nn.Module):
- def __init__(self, in_channels, out_channels, deconv):
- super(UNet1, self).__init__()
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
- self.conv2 = UNetConv(64, 128, 64, se=True)
- self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
- self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
-
- if deconv:
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
- else:
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
-
- for m in self.modules():
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- elif isinstance(m, nn.Linear):
- nn.init.normal_(m.weight, 0, 0.01)
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
-
- def forward(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2(x2)
- x2 = self.conv2_up(x2)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-4, -4, -4, -4))
- x3 = self.conv3(x1 + x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- z = self.conv_bottom(x3)
- return z
-
- def forward_a(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2.conv(x2)
- return x1, x2
-
- def forward_b(self, x1, x2):
- x2 = self.conv2_up(x2)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-4, -4, -4, -4))
- x3 = self.conv3(x1 + x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- z = self.conv_bottom(x3)
- return z
-
-
-class UNet1x3(nn.Module):
- def __init__(self, in_channels, out_channels, deconv):
- super(UNet1x3, self).__init__()
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
- self.conv2 = UNetConv(64, 128, 64, se=True)
- self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
- self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
-
- if deconv:
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2)
- else:
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
-
- for m in self.modules():
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- elif isinstance(m, nn.Linear):
- nn.init.normal_(m.weight, 0, 0.01)
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
-
- def forward(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2(x2)
- x2 = self.conv2_up(x2)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-4, -4, -4, -4))
- x3 = self.conv3(x1 + x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- z = self.conv_bottom(x3)
- return z
-
- def forward_a(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2.conv(x2)
- return x1, x2
-
- def forward_b(self, x1, x2):
- x2 = self.conv2_up(x2)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-4, -4, -4, -4))
- x3 = self.conv3(x1 + x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- z = self.conv_bottom(x3)
- return z
-
-
-class UNet2(nn.Module):
- def __init__(self, in_channels, out_channels, deconv):
- super(UNet2, self).__init__()
-
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
- self.conv2 = UNetConv(64, 64, 128, se=True)
- self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0)
- self.conv3 = UNetConv(128, 256, 128, se=True)
- self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0)
- self.conv4 = UNetConv(128, 64, 64, se=True)
- self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
- self.conv5 = nn.Conv2d(64, 64, 3, 1, 0)
-
- if deconv:
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
- else:
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
-
- for m in self.modules():
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- elif isinstance(m, nn.Linear):
- nn.init.normal_(m.weight, 0, 0.01)
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
-
- def forward(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2(x2)
-
- x3 = self.conv2_down(x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- x3 = self.conv3(x3)
- x3 = self.conv3_up(x3)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
-
- x2 = F.pad(x2, (-4, -4, -4, -4))
- x4 = self.conv4(x2 + x3)
- x4 = self.conv4_up(x4)
- x4 = F.leaky_relu(x4, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-16, -16, -16, -16))
- x5 = self.conv5(x1 + x4)
- x5 = F.leaky_relu(x5, 0.1, inplace=True)
-
- z = self.conv_bottom(x5)
- return z
-
- def forward_a(self, x): # conv234结尾有se
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2.conv(x2)
- return x1, x2
-
- def forward_b(self, x2): # conv234结尾有se
- x3 = self.conv2_down(x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- x3 = self.conv3.conv(x3)
- return x3
-
- def forward_c(self, x2, x3): # conv234结尾有se
- x3 = self.conv3_up(x3)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
-
- x2 = F.pad(x2, (-4, -4, -4, -4))
- x4 = self.conv4.conv(x2 + x3)
- return x4
-
- def forward_d(self, x1, x4): # conv234结尾有se
- x4 = self.conv4_up(x4)
- x4 = F.leaky_relu(x4, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-16, -16, -16, -16))
- x5 = self.conv5(x1 + x4)
- x5 = F.leaky_relu(x5, 0.1, inplace=True)
-
- z = self.conv_bottom(x5)
- return z
-
-
-class UpCunet2x(nn.Module): # 完美tile,全程无损
- def __init__(self, in_channels=3, out_channels=3):
- super(UpCunet2x, self).__init__()
- self.unet1 = UNet1(in_channels, out_channels, deconv=True)
- self.unet2 = UNet2(in_channels, out_channels, deconv=False)
-
- def forward(self, x, tile_mode): # 1.7G
- n, c, h0, w0 = x.shape
- if (tile_mode == 0): # 不tile
- ph = ((h0 - 1) // 2 + 1) * 2
- pw = ((w0 - 1) // 2 + 1) * 2
- x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # 需要保证被2整除
- x = self.unet1.forward(x)
- x0 = self.unet2.forward(x)
- x1 = F.pad(x, (-20, -20, -20, -20))
- x = torch.add(x0, x1)
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2]
- return x
- elif (tile_mode == 1): # 对长边减半
- if (w0 >= h0):
- crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
- crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
- else:
- crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
- crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
- crop_size = (crop_size_h, crop_size_w) # 6.6G
- elif (tile_mode == 2): # hw都减半
- crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
- elif (tile_mode == 3): # hw都三分之一
- crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G
- elif (tile_mode == 4): # hw都四分之一
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
- x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect')
- n, c, h, w = x.shape
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- n_patch = 0
- tmp_dict = {}
- opt_res_dict = {}
- for i in range(0, h - 36, crop_size[0]):
- tmp_dict[i] = {}
- for j in range(0, w - 36, crop_size[1]):
- x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36]
- n, c1, h1, w1 = x_crop.shape
- tmp0, x_crop = self.unet1.forward_a(x_crop)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- n_patch += 1
- tmp_dict[i][j] = (tmp0, x_crop)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 36, crop_size[0]):
- for j in range(0, w - 36, crop_size[1]):
- tmp0, x_crop = tmp_dict[i][j]
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
- se_mean1 /= n_patch
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- for i in range(0, h - 36, crop_size[0]):
- for j in range(0, w - 36, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
- tmp_x3 = self.unet2.forward_b(tmp_x2)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 36, crop_size[0]):
- for j in range(0, w - 36, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
- se_mean1 /= n_patch
- for i in range(0, h - 36, crop_size[0]):
- opt_res_dict[i] = {}
- for j in range(0, w - 36, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
- opt_res_dict[i][j] = x_crop
- del tmp_dict
- torch.cuda.empty_cache()
- res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device)
- if ("Half" in x.type()):
- res = res.half()
- for i in range(0, h - 36, crop_size[0]):
- for j in range(0, w - 36, crop_size[1]):
- res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j]
- del opt_res_dict
- torch.cuda.empty_cache()
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2]
- return res #
-
-
-class UpCunet3x(nn.Module): # 完美tile,全程无损
- def __init__(self, in_channels=3, out_channels=3):
- super(UpCunet3x, self).__init__()
- self.unet1 = UNet1x3(in_channels, out_channels, deconv=True)
- self.unet2 = UNet2(in_channels, out_channels, deconv=False)
-
- def forward(self, x, tile_mode): # 1.7G
- n, c, h0, w0 = x.shape
- if (tile_mode == 0): # 不tile
- ph = ((h0 - 1) // 4 + 1) * 4
- pw = ((w0 - 1) // 4 + 1) * 4
- x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # 需要保证被2整除
- x = self.unet1.forward(x)
- x0 = self.unet2.forward(x)
- x1 = F.pad(x, (-20, -20, -20, -20))
- x = torch.add(x0, x1)
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3]
- return x
- elif (tile_mode == 1): # 对长边减半
- if (w0 >= h0):
- crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
- crop_size_h = (h0 - 1) // 4 * 4 + 4 # 能被4整除
- else:
- crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
- crop_size_w = (w0 - 1) // 4 * 4 + 4 # 能被4整除
- crop_size = (crop_size_h, crop_size_w) # 6.6G
- elif (tile_mode == 2): # hw都减半
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G
- elif (tile_mode == 3): # hw都三分之一
- crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G
- elif (tile_mode == 4): # hw都四分之一
- crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
- x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect')
- n, c, h, w = x.shape
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- n_patch = 0
- tmp_dict = {}
- opt_res_dict = {}
- for i in range(0, h - 28, crop_size[0]):
- tmp_dict[i] = {}
- for j in range(0, w - 28, crop_size[1]):
- x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28]
- n, c1, h1, w1 = x_crop.shape
- tmp0, x_crop = self.unet1.forward_a(x_crop)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- n_patch += 1
- tmp_dict[i][j] = (tmp0, x_crop)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 28, crop_size[0]):
- for j in range(0, w - 28, crop_size[1]):
- tmp0, x_crop = tmp_dict[i][j]
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
- se_mean1 /= n_patch
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- for i in range(0, h - 28, crop_size[0]):
- for j in range(0, w - 28, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
- tmp_x3 = self.unet2.forward_b(tmp_x2)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 28, crop_size[0]):
- for j in range(0, w - 28, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
- se_mean1 /= n_patch
- for i in range(0, h - 28, crop_size[0]):
- opt_res_dict[i] = {}
- for j in range(0, w - 28, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
- opt_res_dict[i][j] = x_crop #
- del tmp_dict
- torch.cuda.empty_cache()
- res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device)
- if ("Half" in x.type()):
- res = res.half()
- for i in range(0, h - 28, crop_size[0]):
- for j in range(0, w - 28, crop_size[1]):
- res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j]
- del opt_res_dict
- torch.cuda.empty_cache()
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3]
- return res
-
-
-class UpCunet4x(nn.Module): # 完美tile,全程无损
- def __init__(self, in_channels=3, out_channels=3):
- super(UpCunet4x, self).__init__()
- self.unet1 = UNet1(in_channels, 64, deconv=True)
- self.unet2 = UNet2(64, 64, deconv=False)
- self.ps = nn.PixelShuffle(2)
- self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True)
-
- def forward(self, x, tile_mode):
- n, c, h0, w0 = x.shape
- x00 = x
- if (tile_mode == 0): # 不tile
- ph = ((h0 - 1) // 2 + 1) * 2
- pw = ((w0 - 1) // 2 + 1) * 2
- x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # 需要保证被2整除
- x = self.unet1.forward(x)
- x0 = self.unet2.forward(x)
- x1 = F.pad(x, (-20, -20, -20, -20))
- x = torch.add(x0, x1)
- x = self.conv_final(x)
- x = F.pad(x, (-1, -1, -1, -1))
- x = self.ps(x)
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4]
- x += F.interpolate(x00, scale_factor=4, mode='nearest')
- return x
- elif (tile_mode == 1): # 对长边减半
- if (w0 >= h0):
- crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
- crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
- else:
- crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
- crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
- crop_size = (crop_size_h, crop_size_w) # 6.6G
- elif (tile_mode == 2): # hw都减半
- crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
- elif (tile_mode == 3): # hw都三分之一
- crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G
- elif (tile_mode == 4): # hw都四分之一
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
- x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect')
- n, c, h, w = x.shape
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- n_patch = 0
- tmp_dict = {}
- opt_res_dict = {}
- for i in range(0, h - 38, crop_size[0]):
- tmp_dict[i] = {}
- for j in range(0, w - 38, crop_size[1]):
- x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38]
- n, c1, h1, w1 = x_crop.shape
- tmp0, x_crop = self.unet1.forward_a(x_crop)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- n_patch += 1
- tmp_dict[i][j] = (tmp0, x_crop)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 38, crop_size[0]):
- for j in range(0, w - 38, crop_size[1]):
- tmp0, x_crop = tmp_dict[i][j]
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
- se_mean1 /= n_patch
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- for i in range(0, h - 38, crop_size[0]):
- for j in range(0, w - 38, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
- tmp_x3 = self.unet2.forward_b(tmp_x2)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 38, crop_size[0]):
- for j in range(0, w - 38, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
- se_mean1 /= n_patch
- for i in range(0, h - 38, crop_size[0]):
- opt_res_dict[i] = {}
- for j in range(0, w - 38, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
- x_crop = self.conv_final(x_crop)
- x_crop = F.pad(x_crop, (-1, -1, -1, -1))
- x_crop = self.ps(x_crop)
- opt_res_dict[i][j] = x_crop
- del tmp_dict
- torch.cuda.empty_cache()
- res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device)
- if ("Half" in x.type()):
- res = res.half()
- for i in range(0, h - 38, crop_size[0]):
- for j in range(0, w - 38, crop_size[1]):
- # print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape)
- res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j]
- del opt_res_dict
- torch.cuda.empty_cache()
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4]
- res += F.interpolate(x00, scale_factor=4, mode='nearest')
- return res #
-
-
-class RealWaifuUpScaler(object):
- def __init__(self, scale, weight_path, half, device):
- weight = torch.load(weight_path, map_location="cpu")
- self.model = eval("UpCunet%sx" % scale)()
- if (half == True):
- self.model = self.model.half().to(device)
- else:
- self.model = self.model.to(device)
- self.model.load_state_dict(weight, strict=True)
- self.model.eval()
- self.half = half
- self.device = device
-
- def np2tensor(self, np_frame):
- if (self.half == False):
- return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255
- else:
- return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255
-
- def tensor2np(self, tensor):
- if (self.half == False):
- return (
- np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0)))
- else:
- return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(),
- (1, 2, 0)))
-
- def __call__(self, frame, tile_mode):
- with torch.no_grad():
- tensor = self.np2tensor(frame)
- result = self.tensor2np(self.model(tensor, tile_mode))
- return result
-
-
-if __name__ == "__main__":
- ###########inference_img
- import time, cv2, sys
- from time import time as ttime
-
- for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3),
- ("weights_v3/up4x-latest-denoise3x.pth", 4)]:
- for tile_mode in [0, 1, 2, 3, 4]:
- upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0")
- input_dir = "%s/input_dir1" % root_path
- output_dir = "%s/opt-dir-all-test" % root_path
- os.makedirs(output_dir, exist_ok=True)
- for name in os.listdir(input_dir):
- print(name)
- tmp = name.split(".")
- inp_path = os.path.join(input_dir, name)
- suffix = tmp[-1]
- prefix = ".".join(tmp[:-1])
- tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
- print(inp_path, tmp_path)
- # 支持中文路径
- # os.link(inp_path, tmp_path)#win用硬链接
- os.symlink(inp_path, tmp_path) # linux用软链接
- frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]]
- t0 = ttime()
- result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1]
- t1 = ttime()
- print(prefix, "done", t1 - t0)
- tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
- cv2.imwrite(tmp_opt_path, result)
- n = 0
- while (1):
- if (n == 0):
- suffix = "_%sx_tile%s.png" % (scale, tile_mode)
- else:
- suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) #
- if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False):
- break
- else:
- n += 1
- final_opt_path = os.path.join(output_dir, prefix + suffix)
- os.rename(tmp_opt_path, final_opt_path)
- os.remove(tmp_path)
diff --git a/spaces/SalML/TableTransformer2CSV/README.md b/spaces/SalML/TableTransformer2CSV/README.md
deleted file mode 100644
index dd4517033f76e7f1ab1fd341e196a02edf7181d9..0000000000000000000000000000000000000000
--- a/spaces/SalML/TableTransformer2CSV/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Image2Table
-emoji: 🚀
-colorFrom: indigo
-colorTo: purple
-sdk: streamlit
-sdk_version: 1.10.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Salesforce/EDICT/my_diffusers/models/resnet.py b/spaces/Salesforce/EDICT/my_diffusers/models/resnet.py
deleted file mode 100644
index fd7428eb58f1e22180a1acef7453ded281db5eb6..0000000000000000000000000000000000000000
--- a/spaces/Salesforce/EDICT/my_diffusers/models/resnet.py
+++ /dev/null
@@ -1,483 +0,0 @@
-from functools import partial
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-class Upsample2D(nn.Module):
- """
- An upsampling layer with an optional convolution.
-
- :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is
- applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- upsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_conv_transpose = use_conv_transpose
- self.name = name
-
- conv = None
- if use_conv_transpose:
- conv = nn.ConvTranspose2d(channels, self.out_channels, 4, 2, 1)
- elif use_conv:
- conv = nn.Conv2d(self.channels, self.out_channels, 3, padding=1)
-
- # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
- if name == "conv":
- self.conv = conv
- else:
- self.Conv2d_0 = conv
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- if self.use_conv_transpose:
- return self.conv(x)
-
- x = F.interpolate(x, scale_factor=2.0, mode="nearest")
-
- # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
- if self.use_conv:
- if self.name == "conv":
- x = self.conv(x)
- else:
- x = self.Conv2d_0(x)
-
- return x
-
-
-class Downsample2D(nn.Module):
- """
- A downsampling layer with an optional convolution.
-
- :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is
- applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- downsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.padding = padding
- stride = 2
- self.name = name
-
- if use_conv:
- conv = nn.Conv2d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
- else:
- assert self.channels == self.out_channels
- conv = nn.AvgPool2d(kernel_size=stride, stride=stride)
-
- # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
- if name == "conv":
- self.Conv2d_0 = conv
- self.conv = conv
- elif name == "Conv2d_0":
- self.conv = conv
- else:
- self.conv = conv
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- if self.use_conv and self.padding == 0:
- pad = (0, 1, 0, 1)
- x = F.pad(x, pad, mode="constant", value=0)
-
- assert x.shape[1] == self.channels
- x = self.conv(x)
-
- return x
-
-
-class FirUpsample2D(nn.Module):
- def __init__(self, channels=None, out_channels=None, use_conv=False, fir_kernel=(1, 3, 3, 1)):
- super().__init__()
- out_channels = out_channels if out_channels else channels
- if use_conv:
- self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1)
- self.use_conv = use_conv
- self.fir_kernel = fir_kernel
- self.out_channels = out_channels
-
- def _upsample_2d(self, x, weight=None, kernel=None, factor=2, gain=1):
- """Fused `upsample_2d()` followed by `Conv2d()`.
-
- Args:
- Padding is performed only once at the beginning, not between the operations. The fused op is considerably more
- efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary:
- order.
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
- C]`.
- weight: Weight tensor of the shape `[filterH, filterW, inChannels,
- outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
- kernel: FIR filter of the shape `[firH, firW]` or `[firN]`
- (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.
- factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0).
-
- Returns:
- Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as
- `x`.
- """
-
- assert isinstance(factor, int) and factor >= 1
-
- # Setup filter kernel.
- if kernel is None:
- kernel = [1] * factor
-
- # setup kernel
- kernel = np.asarray(kernel, dtype=np.float64)
- if kernel.ndim == 1:
- kernel = np.outer(kernel, kernel)
- kernel /= np.sum(kernel)
-
- kernel = kernel * (gain * (factor**2))
-
- if self.use_conv:
- convH = weight.shape[2]
- convW = weight.shape[3]
- inC = weight.shape[1]
-
- p = (kernel.shape[0] - factor) - (convW - 1)
-
- stride = (factor, factor)
- # Determine data dimensions.
- stride = [1, 1, factor, factor]
- output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW)
- output_padding = (
- output_shape[0] - (x.shape[2] - 1) * stride[0] - convH,
- output_shape[1] - (x.shape[3] - 1) * stride[1] - convW,
- )
- assert output_padding[0] >= 0 and output_padding[1] >= 0
- inC = weight.shape[1]
- num_groups = x.shape[1] // inC
-
- # Transpose weights.
- weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW))
- weight = weight[..., ::-1, ::-1].permute(0, 2, 1, 3, 4)
- weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW))
-
- x = F.conv_transpose2d(x, weight, stride=stride, output_padding=output_padding, padding=0)
-
- x = upfirdn2d_native(x, torch.tensor(kernel, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1))
- else:
- p = kernel.shape[0] - factor
- x = upfirdn2d_native(
- x, torch.tensor(kernel, device=x.device), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2)
- )
-
- return x
-
- def forward(self, x):
- if self.use_conv:
- height = self._upsample_2d(x, self.Conv2d_0.weight, kernel=self.fir_kernel)
- height = height + self.Conv2d_0.bias.reshape(1, -1, 1, 1)
- else:
- height = self._upsample_2d(x, kernel=self.fir_kernel, factor=2)
-
- return height
-
-
-class FirDownsample2D(nn.Module):
- def __init__(self, channels=None, out_channels=None, use_conv=False, fir_kernel=(1, 3, 3, 1)):
- super().__init__()
- out_channels = out_channels if out_channels else channels
- if use_conv:
- self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1)
- self.fir_kernel = fir_kernel
- self.use_conv = use_conv
- self.out_channels = out_channels
-
- def _downsample_2d(self, x, weight=None, kernel=None, factor=2, gain=1):
- """Fused `Conv2d()` followed by `downsample_2d()`.
-
- Args:
- Padding is performed only once at the beginning, not between the operations. The fused op is considerably more
- efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary:
- order.
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH,
- filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] //
- numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] *
- factor`, which corresponds to average pooling. factor: Integer downsampling factor (default: 2). gain:
- Scaling factor for signal magnitude (default: 1.0).
-
- Returns:
- Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and same
- datatype as `x`.
- """
-
- assert isinstance(factor, int) and factor >= 1
- if kernel is None:
- kernel = [1] * factor
-
- # setup kernel
- kernel = np.asarray(kernel, dtype=np.float64)
- if kernel.ndim == 1:
- kernel = np.outer(kernel, kernel)
- kernel /= np.sum(kernel)
-
- kernel = kernel * gain
-
- if self.use_conv:
- _, _, convH, convW = weight.shape
- p = (kernel.shape[0] - factor) + (convW - 1)
- s = [factor, factor]
- x = upfirdn2d_native(x, torch.tensor(kernel, device=x.device), pad=((p + 1) // 2, p // 2))
- x = F.conv2d(x, weight, stride=s, padding=0)
- else:
- p = kernel.shape[0] - factor
- x = upfirdn2d_native(x, torch.tensor(kernel, device=x.device), down=factor, pad=((p + 1) // 2, p // 2))
-
- return x
-
- def forward(self, x):
- if self.use_conv:
- x = self._downsample_2d(x, weight=self.Conv2d_0.weight, kernel=self.fir_kernel)
- x = x + self.Conv2d_0.bias.reshape(1, -1, 1, 1)
- else:
- x = self._downsample_2d(x, kernel=self.fir_kernel, factor=2)
-
- return x
-
-
-class ResnetBlock2D(nn.Module):
- def __init__(
- self,
- *,
- in_channels,
- out_channels=None,
- conv_shortcut=False,
- dropout=0.0,
- temb_channels=512,
- groups=32,
- groups_out=None,
- pre_norm=True,
- eps=1e-6,
- non_linearity="swish",
- time_embedding_norm="default",
- kernel=None,
- output_scale_factor=1.0,
- use_nin_shortcut=None,
- up=False,
- down=False,
- ):
- super().__init__()
- self.pre_norm = pre_norm
- self.pre_norm = True
- self.in_channels = in_channels
- out_channels = in_channels if out_channels is None else out_channels
- self.out_channels = out_channels
- self.use_conv_shortcut = conv_shortcut
- self.time_embedding_norm = time_embedding_norm
- self.up = up
- self.down = down
- self.output_scale_factor = output_scale_factor
-
- if groups_out is None:
- groups_out = groups
-
- self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
-
- self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
-
- if temb_channels is not None:
- self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels)
- else:
- self.time_emb_proj = None
-
- self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
- self.dropout = torch.nn.Dropout(dropout)
- self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
-
- if non_linearity == "swish":
- self.nonlinearity = lambda x: F.silu(x)
- elif non_linearity == "mish":
- self.nonlinearity = Mish()
- elif non_linearity == "silu":
- self.nonlinearity = nn.SiLU()
-
- self.upsample = self.downsample = None
- if self.up:
- if kernel == "fir":
- fir_kernel = (1, 3, 3, 1)
- self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel)
- elif kernel == "sde_vp":
- self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest")
- else:
- self.upsample = Upsample2D(in_channels, use_conv=False)
- elif self.down:
- if kernel == "fir":
- fir_kernel = (1, 3, 3, 1)
- self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel)
- elif kernel == "sde_vp":
- self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2)
- else:
- self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op")
-
- self.use_nin_shortcut = self.in_channels != self.out_channels if use_nin_shortcut is None else use_nin_shortcut
-
- self.conv_shortcut = None
- if self.use_nin_shortcut:
- self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
-
- def forward(self, x, temb):
- hidden_states = x
-
- # make sure hidden states is in float32
- # when running in half-precision
- hidden_states = self.norm1(hidden_states.double()).type(hidden_states.dtype)
- hidden_states = self.nonlinearity(hidden_states)
-
- if self.upsample is not None:
- x = self.upsample(x)
- hidden_states = self.upsample(hidden_states)
- elif self.downsample is not None:
- x = self.downsample(x)
- hidden_states = self.downsample(hidden_states)
-
- hidden_states = self.conv1(hidden_states)
-
- if temb is not None:
- temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None]
- hidden_states = hidden_states + temb
-
- # make sure hidden states is in float32
- # when running in half-precision
- hidden_states = self.norm2(hidden_states.double()).type(hidden_states.dtype)
- hidden_states = self.nonlinearity(hidden_states)
-
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.conv2(hidden_states)
-
- if self.conv_shortcut is not None:
- x = self.conv_shortcut(x)
-
- out = (x + hidden_states) / self.output_scale_factor
-
- return out
-
-
-class Mish(torch.nn.Module):
- def forward(self, x):
- return x * torch.tanh(torch.nn.functional.softplus(x))
-
-
-def upsample_2d(x, kernel=None, factor=2, gain=1):
- r"""Upsample2D a batch of 2D images with the given filter.
-
- Args:
- Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given
- filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified
- `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is a:
- multiple of the upsampling factor.
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
- C]`.
- k: FIR filter of the shape `[firH, firW]` or `[firN]`
- (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.
- factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0).
-
- Returns:
- Tensor of the shape `[N, C, H * factor, W * factor]`
- """
- assert isinstance(factor, int) and factor >= 1
- if kernel is None:
- kernel = [1] * factor
-
- kernel = np.asarray(kernel, dtype=np.float64)
- if kernel.ndim == 1:
- kernel = np.outer(kernel, kernel)
- kernel /= np.sum(kernel)
-
- kernel = kernel * (gain * (factor**2))
- p = kernel.shape[0] - factor
- return upfirdn2d_native(
- x, torch.tensor(kernel, device=x.device), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2)
- )
-
-
-def downsample_2d(x, kernel=None, factor=2, gain=1):
- r"""Downsample2D a batch of 2D images with the given filter.
-
- Args:
- Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the
- given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the
- specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its
- shape is a multiple of the downsampling factor.
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
- C]`.
- kernel: FIR filter of the shape `[firH, firW]` or `[firN]`
- (separable). The default is `[1] * factor`, which corresponds to average pooling.
- factor: Integer downsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0).
-
- Returns:
- Tensor of the shape `[N, C, H // factor, W // factor]`
- """
-
- assert isinstance(factor, int) and factor >= 1
- if kernel is None:
- kernel = [1] * factor
-
- kernel = np.asarray(kernel, dtype=np.float64)
- if kernel.ndim == 1:
- kernel = np.outer(kernel, kernel)
- kernel /= np.sum(kernel)
-
- kernel = kernel * gain
- p = kernel.shape[0] - factor
- return upfirdn2d_native(x, torch.tensor(kernel, device=x.device), down=factor, pad=((p + 1) // 2, p // 2))
-
-
-def upfirdn2d_native(input, kernel, up=1, down=1, pad=(0, 0)):
- up_x = up_y = up
- down_x = down_y = down
- pad_x0 = pad_y0 = pad[0]
- pad_x1 = pad_y1 = pad[1]
-
- _, channel, in_h, in_w = input.shape
- input = input.reshape(-1, in_h, in_w, 1)
-
- _, in_h, in_w, minor = input.shape
- kernel_h, kernel_w = kernel.shape
-
- out = input.view(-1, in_h, 1, in_w, 1, minor)
-
- # Temporary workaround for mps specific issue: https://github.com/pytorch/pytorch/issues/84535
- if input.device.type == "mps":
- out = out.to("cpu")
- out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
- out = out.view(-1, in_h * up_y, in_w * up_x, minor)
-
- out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)])
- out = out.to(input.device) # Move back to mps if necessary
- out = out[
- :,
- max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
- max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
- :,
- ]
-
- out = out.permute(0, 3, 1, 2)
- out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1])
- w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
- out = F.conv2d(out, w)
- out = out.reshape(
- -1,
- minor,
- in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
- in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
- )
- out = out.permute(0, 2, 3, 1)
- out = out[:, ::down_y, ::down_x, :]
-
- out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
- out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
-
- return out.view(-1, channel, out_h, out_w)
diff --git a/spaces/Sandiago21/speech-to-speech-translation-italian/app.py b/spaces/Sandiago21/speech-to-speech-translation-italian/app.py
deleted file mode 100644
index af6054153dda45dd9702a439f483fa4dade3b461..0000000000000000000000000000000000000000
--- a/spaces/Sandiago21/speech-to-speech-translation-italian/app.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import gradio as gr
-import numpy as np
-import torch
-from datasets import load_dataset
-from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
-
-
-device = "cuda:0" if torch.cuda.is_available() else "cpu"
-
-# load speech translation checkpoint
-asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-large-v2", device=device)
-
-# load text-to-speech checkpoint and speaker embeddings
-model_id = "Sandiago21/speecht5_finetuned_voxpopuli_it" # update with your model id
-# pipe = pipeline("automatic-speech-recognition", model=model_id)
-model = SpeechT5ForTextToSpeech.from_pretrained(model_id)
-vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
-embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
-speaker_embeddings = torch.tensor(embeddings_dataset[7440]["xvector"]).unsqueeze(0)
-
-processor = SpeechT5Processor.from_pretrained(model_id)
-
-replacements = [
- ("á", "a"),
- ("ç", "c"),
- ("è", "e"),
- ("ì", "i"),
- ("í", "i"),
- ("ò", "o"),
- ("ó", "o"),
- ("ù", "u"),
- ("ú", "u"),
- ("š", "s"),
- ("ï", "i"),
-]
-
-def cleanup_text(text):
- for src, dst in replacements:
- text = text.replace(src, dst)
- return text
-
-def synthesize_speech(text):
- text = cleanup_text(text)
- inputs = processor(text=text, return_tensors="pt")
- speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
-
- return gr.Audio.update(value=(16000, speech.cpu().numpy()))
-
-def translate(audio):
- outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "italian"})
- return outputs["text"]
-
-
-def synthesise(text):
- text = cleanup_text(text)
- inputs = processor(text=text, return_tensors="pt")
- speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
- return speech.cpu()
-
-
-def speech_to_speech_translation(audio):
- translated_text = translate(audio)
- synthesised_speech = synthesise(translated_text)
- synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
- return 16000, synthesised_speech
-
-
-title = "Cascaded STST"
-description = """
-Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in Italian. Demo uses OpenAI's [Whisper Large v2](https://huggingface.co/openai/whisper-large-v2) model for speech translation, and [Sandiago21/speecht5_finetuned_voxpopuli_it](https://huggingface.co/Sandiago21/speecht5_finetuned_voxpopuli_it) checkpoint for text-to-speech, which is based on Microsoft's
-[SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech, fine-tuned in Italian Audio dataset:
-
-"""
-
-demo = gr.Blocks()
-
-mic_translate = gr.Interface(
- fn=speech_to_speech_translation,
- inputs=gr.Audio(source="microphone", type="filepath"),
- outputs=gr.Audio(label="Generated Speech", type="numpy"),
- title=title,
- description=description,
-)
-
-file_translate = gr.Interface(
- fn=speech_to_speech_translation,
- inputs=gr.Audio(source="upload", type="filepath"),
- outputs=gr.Audio(label="Generated Speech", type="numpy"),
- examples=[["./example.wav"]],
- title=title,
- description=description,
-)
-
-with demo:
- gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
-
-demo.launch()
diff --git a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/SPPE/src/utils/dataset/mpii.py b/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/SPPE/src/utils/dataset/mpii.py
deleted file mode 100644
index eae0dd884ae1c9f02fd46b2924cb0132e8275c74..0000000000000000000000000000000000000000
--- a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/SPPE/src/utils/dataset/mpii.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import os
-import h5py
-from functools import reduce
-
-import torch.utils.data as data
-from ..pose import generateSampleBox
-from opt import opt
-
-
-class Mpii(data.Dataset):
- def __init__(self, train=True, sigma=1,
- scale_factor=0.25, rot_factor=30, label_type='Gaussian'):
- self.img_folder = '../data/mpii/images' # root image folders
- self.is_train = train # training set or test set
- self.inputResH = 320
- self.inputResW = 256
- self.outputResH = 80
- self.outputResW = 64
- self.sigma = sigma
- self.scale_factor = (0.2, 0.3)
- self.rot_factor = rot_factor
- self.label_type = label_type
-
- self.nJoints_mpii = 16
- self.nJoints = 16
-
- self.accIdxs = (1, 2, 3, 4, 5, 6,
- 11, 12, 15, 16)
- self.flipRef = ((1, 6), (2, 5), (3, 4),
- (11, 16), (12, 15), (13, 14))
-
- # create train/val split
- with h5py.File('../data/mpii/annot_mpii.h5', 'r') as annot:
- # train
- self.imgname_mpii_train = annot['imgname'][:-1358]
- self.bndbox_mpii_train = annot['bndbox'][:-1358]
- self.part_mpii_train = annot['part'][:-1358]
- # val
- self.imgname_mpii_val = annot['imgname'][-1358:]
- self.bndbox_mpii_val = annot['bndbox'][-1358:]
- self.part_mpii_val = annot['part'][-1358:]
-
- self.size_train = self.imgname_mpii_train.shape[0]
- self.size_val = self.imgname_mpii_val.shape[0]
- self.train, self.valid = [], []
-
- def __getitem__(self, index):
- sf = self.scale_factor
-
- if self.is_train:
- part = self.part_mpii_train[index]
- bndbox = self.bndbox_mpii_train[index]
- imgname = self.imgname_mpii_train[index]
- else:
- part = self.part_mpii_val[index]
- bndbox = self.bndbox_mpii_val[index]
- imgname = self.imgname_mpii_val[index]
-
- imgname = reduce(lambda x, y: x + y, map(lambda x: chr(int(x)), imgname))[:13]
- img_path = os.path.join(self.img_folder, imgname)
-
- metaData = generateSampleBox(img_path, bndbox, part, self.nJoints,
- 'mpii', sf, self, train=self.is_train)
-
- inp, out_bigcircle, out_smallcircle, out, setMask = metaData
-
- label = []
- for i in range(opt.nStack):
- if i < 2:
- #label.append(out_bigcircle.clone())
- label.append(out.clone())
- elif i < 4:
- #label.append(out_smallcircle.clone())
- label.append(out.clone())
- else:
- label.append(out.clone())
-
- return inp, label, setMask
-
- def __len__(self):
- if self.is_train:
- return self.size_train
- else:
- return self.size_val
diff --git a/spaces/Silentlin/DiffSinger/modules/__init__.py b/spaces/Silentlin/DiffSinger/modules/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Skyler123/TangGPT/modules/shared.py b/spaces/Skyler123/TangGPT/modules/shared.py
deleted file mode 100644
index 70f13cbcf84984487b5e4e47e3bcc1dbb082511a..0000000000000000000000000000000000000000
--- a/spaces/Skyler123/TangGPT/modules/shared.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from modules.presets import COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST
-import os
-import queue
-
-class State:
- interrupted = False
- multi_api_key = False
- completion_url = COMPLETION_URL
- balance_api_url = BALANCE_API_URL
- usage_api_url = USAGE_API_URL
-
- def interrupt(self):
- self.interrupted = True
-
- def recover(self):
- self.interrupted = False
-
- def set_api_host(self, api_host):
- self.completion_url = f"https://{api_host}/v1/chat/completions"
- self.balance_api_url = f"https://{api_host}/dashboard/billing/credit_grants"
- self.usage_api_url = f"https://{api_host}/dashboard/billing/usage"
- os.environ["OPENAI_API_BASE"] = f"https://{api_host}/v1"
-
- def reset_api_host(self):
- self.completion_url = COMPLETION_URL
- self.balance_api_url = BALANCE_API_URL
- self.usage_api_url = USAGE_API_URL
- os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}/v1"
- return API_HOST
-
- def reset_all(self):
- self.interrupted = False
- self.completion_url = COMPLETION_URL
-
- def set_api_key_queue(self, api_key_list):
- self.multi_api_key = True
- self.api_key_queue = queue.Queue()
- for api_key in api_key_list:
- self.api_key_queue.put(api_key)
-
- def switching_api_key(self, func):
- if not hasattr(self, "api_key_queue"):
- return func
-
- def wrapped(*args, **kwargs):
- api_key = self.api_key_queue.get()
- args = list(args)[1:]
- ret = func(api_key, *args, **kwargs)
- self.api_key_queue.put(api_key)
- return ret
-
- return wrapped
-
-
-state = State()
diff --git a/spaces/Snowling/White-box-Cartoonization/README.md b/spaces/Snowling/White-box-Cartoonization/README.md
deleted file mode 100644
index 9860239cf42c94e385faaaa75a85311e010d64f7..0000000000000000000000000000000000000000
--- a/spaces/Snowling/White-box-Cartoonization/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-python_version: 3.7
-title: White Box Cartoonization
-emoji: 📚
-colorFrom: purple
-colorTo: green
-sdk: gradio
-sdk_version: 2.9.4
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: hylee/White-box-Cartoonization
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/lib/editorhooks.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/lib/editorhooks.py
deleted file mode 100644
index d8bd6ac81bcfa42204ecd9415b8f1ffd1dd0be57..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/lib/editorhooks.py
+++ /dev/null
@@ -1,127 +0,0 @@
-""" 'editor' hooks for common editors that work well with ipython
-
-They should honor the line number argument, at least.
-
-Contributions are *very* welcome.
-"""
-
-import os
-import shlex
-import subprocess
-import sys
-
-from IPython import get_ipython
-from IPython.core.error import TryNext
-from IPython.utils import py3compat
-
-
-def install_editor(template, wait=False):
- """Installs the editor that is called by IPython for the %edit magic.
-
- This overrides the default editor, which is generally set by your EDITOR
- environment variable or is notepad (windows) or vi (linux). By supplying a
- template string `run_template`, you can control how the editor is invoked
- by IPython -- (e.g. the format in which it accepts command line options)
-
- Parameters
- ----------
- template : basestring
- run_template acts as a template for how your editor is invoked by
- the shell. It should contain '{filename}', which will be replaced on
- invocation with the file name, and '{line}', $line by line number
- (or 0) to invoke the file with.
- wait : bool
- If `wait` is true, wait until the user presses enter before returning,
- to facilitate non-blocking editors that exit immediately after
- the call.
- """
-
- # not all editors support $line, so we'll leave out this check
- # for substitution in ['$file', '$line']:
- # if not substitution in run_template:
- # raise ValueError(('run_template should contain %s'
- # ' for string substitution. You supplied "%s"' % (substitution,
- # run_template)))
-
- def call_editor(self, filename, line=0):
- if line is None:
- line = 0
- cmd = template.format(filename=shlex.quote(filename), line=line)
- print(">", cmd)
- # shlex.quote doesn't work right on Windows, but it does after splitting
- if sys.platform.startswith('win'):
- cmd = shlex.split(cmd)
- proc = subprocess.Popen(cmd, shell=True)
- if proc.wait() != 0:
- raise TryNext()
- if wait:
- py3compat.input("Press Enter when done editing:")
-
- get_ipython().set_hook('editor', call_editor)
- get_ipython().editor = template
-
-
-# in these, exe is always the path/name of the executable. Useful
-# if you don't have the editor directory in your path
-def komodo(exe=u'komodo'):
- """ Activestate Komodo [Edit] """
- install_editor(exe + u' -l {line} {filename}', wait=True)
-
-
-def scite(exe=u"scite"):
- """ SciTE or Sc1 """
- install_editor(exe + u' {filename} -goto:{line}')
-
-
-def notepadplusplus(exe=u'notepad++'):
- """ Notepad++ http://notepad-plus.sourceforge.net """
- install_editor(exe + u' -n{line} {filename}')
-
-
-def jed(exe=u'jed'):
- """ JED, the lightweight emacsish editor """
- install_editor(exe + u' +{line} {filename}')
-
-
-def idle(exe=u'idle'):
- """ Idle, the editor bundled with python
-
- Parameters
- ----------
- exe : str, None
- If none, should be pretty smart about finding the executable.
- """
- if exe is None:
- import idlelib
- p = os.path.dirname(idlelib.__filename__)
- # i'm not sure if this actually works. Is this idle.py script
- # guaranteed to be executable?
- exe = os.path.join(p, 'idle.py')
- install_editor(exe + u' {filename}')
-
-
-def mate(exe=u'mate'):
- """ TextMate, the missing editor"""
- # wait=True is not required since we're using the -w flag to mate
- install_editor(exe + u' -w -l {line} {filename}')
-
-
-# ##########################################
-# these are untested, report any problems
-# ##########################################
-
-
-def emacs(exe=u'emacs'):
- install_editor(exe + u' +{line} {filename}')
-
-
-def gnuclient(exe=u'gnuclient'):
- install_editor(exe + u' -nw +{line} {filename}')
-
-
-def crimson_editor(exe=u'cedt.exe'):
- install_editor(exe + u' /L:{line} {filename}')
-
-
-def kate(exe=u'kate'):
- install_editor(exe + u' -u -l {line} {filename}')
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageFile.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageFile.py
deleted file mode 100644
index 8e4f7dfb2c8854ee3a1f65efd6535732df1764aa..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageFile.py
+++ /dev/null
@@ -1,773 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# base class for image file handlers
-#
-# history:
-# 1995-09-09 fl Created
-# 1996-03-11 fl Fixed load mechanism.
-# 1996-04-15 fl Added pcx/xbm decoders.
-# 1996-04-30 fl Added encoders.
-# 1996-12-14 fl Added load helpers
-# 1997-01-11 fl Use encode_to_file where possible
-# 1997-08-27 fl Flush output in _save
-# 1998-03-05 fl Use memory mapping for some modes
-# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B"
-# 1999-05-31 fl Added image parser
-# 2000-10-12 fl Set readonly flag on memory-mapped images
-# 2002-03-20 fl Use better messages for common decoder errors
-# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available
-# 2003-10-30 fl Added StubImageFile class
-# 2004-02-25 fl Made incremental parser more robust
-#
-# Copyright (c) 1997-2004 by Secret Labs AB
-# Copyright (c) 1995-2004 by Fredrik Lundh
-#
-# See the README file for information on usage and redistribution.
-#
-
-import io
-import itertools
-import struct
-import sys
-
-from . import Image
-from ._util import is_path
-
-MAXBLOCK = 65536
-
-SAFEBLOCK = 1024 * 1024
-
-LOAD_TRUNCATED_IMAGES = False
-"""Whether or not to load truncated image files. User code may change this."""
-
-ERRORS = {
- -1: "image buffer overrun error",
- -2: "decoding error",
- -3: "unknown error",
- -8: "bad configuration",
- -9: "out of memory error",
-}
-"""
-Dict of known error codes returned from :meth:`.PyDecoder.decode`,
-:meth:`.PyEncoder.encode` :meth:`.PyEncoder.encode_to_pyfd` and
-:meth:`.PyEncoder.encode_to_file`.
-"""
-
-
-#
-# --------------------------------------------------------------------
-# Helpers
-
-
-def raise_oserror(error):
- try:
- msg = Image.core.getcodecstatus(error)
- except AttributeError:
- msg = ERRORS.get(error)
- if not msg:
- msg = f"decoder error {error}"
- msg += " when reading image file"
- raise OSError(msg)
-
-
-def _tilesort(t):
- # sort on offset
- return t[2]
-
-
-#
-# --------------------------------------------------------------------
-# ImageFile base class
-
-
-class ImageFile(Image.Image):
- """Base class for image file format handlers."""
-
- def __init__(self, fp=None, filename=None):
- super().__init__()
-
- self._min_frame = 0
-
- self.custom_mimetype = None
-
- self.tile = None
- """ A list of tile descriptors, or ``None`` """
-
- self.readonly = 1 # until we know better
-
- self.decoderconfig = ()
- self.decodermaxblock = MAXBLOCK
-
- if is_path(fp):
- # filename
- self.fp = open(fp, "rb")
- self.filename = fp
- self._exclusive_fp = True
- else:
- # stream
- self.fp = fp
- self.filename = filename
- # can be overridden
- self._exclusive_fp = None
-
- try:
- try:
- self._open()
- except (
- IndexError, # end of data
- TypeError, # end of data (ord)
- KeyError, # unsupported mode
- EOFError, # got header but not the first frame
- struct.error,
- ) as v:
- raise SyntaxError(v) from v
-
- if not self.mode or self.size[0] <= 0 or self.size[1] <= 0:
- msg = "not identified by this driver"
- raise SyntaxError(msg)
- except BaseException:
- # close the file only if we have opened it this constructor
- if self._exclusive_fp:
- self.fp.close()
- raise
-
- def get_format_mimetype(self):
- if self.custom_mimetype:
- return self.custom_mimetype
- if self.format is not None:
- return Image.MIME.get(self.format.upper())
-
- def __setstate__(self, state):
- self.tile = []
- super().__setstate__(state)
-
- def verify(self):
- """Check file integrity"""
-
- # raise exception if something's wrong. must be called
- # directly after open, and closes file when finished.
- if self._exclusive_fp:
- self.fp.close()
- self.fp = None
-
- def load(self):
- """Load image data based on tile list"""
-
- if self.tile is None:
- msg = "cannot load this image"
- raise OSError(msg)
-
- pixel = Image.Image.load(self)
- if not self.tile:
- return pixel
-
- self.map = None
- use_mmap = self.filename and len(self.tile) == 1
- # As of pypy 2.1.0, memory mapping was failing here.
- use_mmap = use_mmap and not hasattr(sys, "pypy_version_info")
-
- readonly = 0
-
- # look for read/seek overrides
- try:
- read = self.load_read
- # don't use mmap if there are custom read/seek functions
- use_mmap = False
- except AttributeError:
- read = self.fp.read
-
- try:
- seek = self.load_seek
- use_mmap = False
- except AttributeError:
- seek = self.fp.seek
-
- if use_mmap:
- # try memory mapping
- decoder_name, extents, offset, args = self.tile[0]
- if (
- decoder_name == "raw"
- and len(args) >= 3
- and args[0] == self.mode
- and args[0] in Image._MAPMODES
- ):
- try:
- # use mmap, if possible
- import mmap
-
- with open(self.filename) as fp:
- self.map = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ)
- if offset + self.size[1] * args[1] > self.map.size():
- # buffer is not large enough
- raise OSError
- self.im = Image.core.map_buffer(
- self.map, self.size, decoder_name, offset, args
- )
- readonly = 1
- # After trashing self.im,
- # we might need to reload the palette data.
- if self.palette:
- self.palette.dirty = 1
- except (AttributeError, OSError, ImportError):
- self.map = None
-
- self.load_prepare()
- err_code = -3 # initialize to unknown error
- if not self.map:
- # sort tiles in file order
- self.tile.sort(key=_tilesort)
-
- try:
- # FIXME: This is a hack to handle TIFF's JpegTables tag.
- prefix = self.tile_prefix
- except AttributeError:
- prefix = b""
-
- # Remove consecutive duplicates that only differ by their offset
- self.tile = [
- list(tiles)[-1]
- for _, tiles in itertools.groupby(
- self.tile, lambda tile: (tile[0], tile[1], tile[3])
- )
- ]
- for decoder_name, extents, offset, args in self.tile:
- seek(offset)
- decoder = Image._getdecoder(
- self.mode, decoder_name, args, self.decoderconfig
- )
- try:
- decoder.setimage(self.im, extents)
- if decoder.pulls_fd:
- decoder.setfd(self.fp)
- err_code = decoder.decode(b"")[1]
- else:
- b = prefix
- while True:
- try:
- s = read(self.decodermaxblock)
- except (IndexError, struct.error) as e:
- # truncated png/gif
- if LOAD_TRUNCATED_IMAGES:
- break
- else:
- msg = "image file is truncated"
- raise OSError(msg) from e
-
- if not s: # truncated jpeg
- if LOAD_TRUNCATED_IMAGES:
- break
- else:
- msg = (
- "image file is truncated "
- f"({len(b)} bytes not processed)"
- )
- raise OSError(msg)
-
- b = b + s
- n, err_code = decoder.decode(b)
- if n < 0:
- break
- b = b[n:]
- finally:
- # Need to cleanup here to prevent leaks
- decoder.cleanup()
-
- self.tile = []
- self.readonly = readonly
-
- self.load_end()
-
- if self._exclusive_fp and self._close_exclusive_fp_after_loading:
- self.fp.close()
- self.fp = None
-
- if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0:
- # still raised if decoder fails to return anything
- raise_oserror(err_code)
-
- return Image.Image.load(self)
-
- def load_prepare(self):
- # create image memory if necessary
- if not self.im or self.im.mode != self.mode or self.im.size != self.size:
- self.im = Image.core.new(self.mode, self.size)
- # create palette (optional)
- if self.mode == "P":
- Image.Image.load(self)
-
- def load_end(self):
- # may be overridden
- pass
-
- # may be defined for contained formats
- # def load_seek(self, pos):
- # pass
-
- # may be defined for blocked formats (e.g. PNG)
- # def load_read(self, bytes):
- # pass
-
- def _seek_check(self, frame):
- if (
- frame < self._min_frame
- # Only check upper limit on frames if additional seek operations
- # are not required to do so
- or (
- not (hasattr(self, "_n_frames") and self._n_frames is None)
- and frame >= self.n_frames + self._min_frame
- )
- ):
- msg = "attempt to seek outside sequence"
- raise EOFError(msg)
-
- return self.tell() != frame
-
-
-class StubImageFile(ImageFile):
- """
- Base class for stub image loaders.
-
- A stub loader is an image loader that can identify files of a
- certain format, but relies on external code to load the file.
- """
-
- def _open(self):
- msg = "StubImageFile subclass must implement _open"
- raise NotImplementedError(msg)
-
- def load(self):
- loader = self._load()
- if loader is None:
- msg = f"cannot find loader for this {self.format} file"
- raise OSError(msg)
- image = loader.load(self)
- assert image is not None
- # become the other object (!)
- self.__class__ = image.__class__
- self.__dict__ = image.__dict__
- return image.load()
-
- def _load(self):
- """(Hook) Find actual image loader."""
- msg = "StubImageFile subclass must implement _load"
- raise NotImplementedError(msg)
-
-
-class Parser:
- """
- Incremental image parser. This class implements the standard
- feed/close consumer interface.
- """
-
- incremental = None
- image = None
- data = None
- decoder = None
- offset = 0
- finished = 0
-
- def reset(self):
- """
- (Consumer) Reset the parser. Note that you can only call this
- method immediately after you've created a parser; parser
- instances cannot be reused.
- """
- assert self.data is None, "cannot reuse parsers"
-
- def feed(self, data):
- """
- (Consumer) Feed data to the parser.
-
- :param data: A string buffer.
- :exception OSError: If the parser failed to parse the image file.
- """
- # collect data
-
- if self.finished:
- return
-
- if self.data is None:
- self.data = data
- else:
- self.data = self.data + data
-
- # parse what we have
- if self.decoder:
- if self.offset > 0:
- # skip header
- skip = min(len(self.data), self.offset)
- self.data = self.data[skip:]
- self.offset = self.offset - skip
- if self.offset > 0 or not self.data:
- return
-
- n, e = self.decoder.decode(self.data)
-
- if n < 0:
- # end of stream
- self.data = None
- self.finished = 1
- if e < 0:
- # decoding error
- self.image = None
- raise_oserror(e)
- else:
- # end of image
- return
- self.data = self.data[n:]
-
- elif self.image:
- # if we end up here with no decoder, this file cannot
- # be incrementally parsed. wait until we've gotten all
- # available data
- pass
-
- else:
- # attempt to open this file
- try:
- with io.BytesIO(self.data) as fp:
- im = Image.open(fp)
- except OSError:
- # traceback.print_exc()
- pass # not enough data
- else:
- flag = hasattr(im, "load_seek") or hasattr(im, "load_read")
- if flag or len(im.tile) != 1:
- # custom load code, or multiple tiles
- self.decode = None
- else:
- # initialize decoder
- im.load_prepare()
- d, e, o, a = im.tile[0]
- im.tile = []
- self.decoder = Image._getdecoder(im.mode, d, a, im.decoderconfig)
- self.decoder.setimage(im.im, e)
-
- # calculate decoder offset
- self.offset = o
- if self.offset <= len(self.data):
- self.data = self.data[self.offset :]
- self.offset = 0
-
- self.image = im
-
- def __enter__(self):
- return self
-
- def __exit__(self, *args):
- self.close()
-
- def close(self):
- """
- (Consumer) Close the stream.
-
- :returns: An image object.
- :exception OSError: If the parser failed to parse the image file either
- because it cannot be identified or cannot be
- decoded.
- """
- # finish decoding
- if self.decoder:
- # get rid of what's left in the buffers
- self.feed(b"")
- self.data = self.decoder = None
- if not self.finished:
- msg = "image was incomplete"
- raise OSError(msg)
- if not self.image:
- msg = "cannot parse this image"
- raise OSError(msg)
- if self.data:
- # incremental parsing not possible; reopen the file
- # not that we have all data
- with io.BytesIO(self.data) as fp:
- try:
- self.image = Image.open(fp)
- finally:
- self.image.load()
- return self.image
-
-
-# --------------------------------------------------------------------
-
-
-def _save(im, fp, tile, bufsize=0):
- """Helper to save image based on tile list
-
- :param im: Image object.
- :param fp: File object.
- :param tile: Tile list.
- :param bufsize: Optional buffer size
- """
-
- im.load()
- if not hasattr(im, "encoderconfig"):
- im.encoderconfig = ()
- tile.sort(key=_tilesort)
- # FIXME: make MAXBLOCK a configuration parameter
- # It would be great if we could have the encoder specify what it needs
- # But, it would need at least the image size in most cases. RawEncode is
- # a tricky case.
- bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c
- try:
- fh = fp.fileno()
- fp.flush()
- _encode_tile(im, fp, tile, bufsize, fh)
- except (AttributeError, io.UnsupportedOperation) as exc:
- _encode_tile(im, fp, tile, bufsize, None, exc)
- if hasattr(fp, "flush"):
- fp.flush()
-
-
-def _encode_tile(im, fp, tile, bufsize, fh, exc=None):
- for e, b, o, a in tile:
- if o > 0:
- fp.seek(o)
- encoder = Image._getencoder(im.mode, e, a, im.encoderconfig)
- try:
- encoder.setimage(im.im, b)
- if encoder.pushes_fd:
- encoder.setfd(fp)
- errcode = encoder.encode_to_pyfd()[1]
- else:
- if exc:
- # compress to Python file-compatible object
- while True:
- errcode, data = encoder.encode(bufsize)[1:]
- fp.write(data)
- if errcode:
- break
- else:
- # slight speedup: compress to real file object
- errcode = encoder.encode_to_file(fh, bufsize)
- if errcode < 0:
- msg = f"encoder error {errcode} when writing image file"
- raise OSError(msg) from exc
- finally:
- encoder.cleanup()
-
-
-def _safe_read(fp, size):
- """
- Reads large blocks in a safe way. Unlike fp.read(n), this function
- doesn't trust the user. If the requested size is larger than
- SAFEBLOCK, the file is read block by block.
-
- :param fp: File handle. Must implement a read method.
- :param size: Number of bytes to read.
- :returns: A string containing size bytes of data.
-
- Raises an OSError if the file is truncated and the read cannot be completed
-
- """
- if size <= 0:
- return b""
- if size <= SAFEBLOCK:
- data = fp.read(size)
- if len(data) < size:
- msg = "Truncated File Read"
- raise OSError(msg)
- return data
- data = []
- remaining_size = size
- while remaining_size > 0:
- block = fp.read(min(remaining_size, SAFEBLOCK))
- if not block:
- break
- data.append(block)
- remaining_size -= len(block)
- if sum(len(d) for d in data) < size:
- msg = "Truncated File Read"
- raise OSError(msg)
- return b"".join(data)
-
-
-class PyCodecState:
- def __init__(self):
- self.xsize = 0
- self.ysize = 0
- self.xoff = 0
- self.yoff = 0
-
- def extents(self):
- return self.xoff, self.yoff, self.xoff + self.xsize, self.yoff + self.ysize
-
-
-class PyCodec:
- def __init__(self, mode, *args):
- self.im = None
- self.state = PyCodecState()
- self.fd = None
- self.mode = mode
- self.init(args)
-
- def init(self, args):
- """
- Override to perform codec specific initialization
-
- :param args: Array of args items from the tile entry
- :returns: None
- """
- self.args = args
-
- def cleanup(self):
- """
- Override to perform codec specific cleanup
-
- :returns: None
- """
- pass
-
- def setfd(self, fd):
- """
- Called from ImageFile to set the Python file-like object
-
- :param fd: A Python file-like object
- :returns: None
- """
- self.fd = fd
-
- def setimage(self, im, extents=None):
- """
- Called from ImageFile to set the core output image for the codec
-
- :param im: A core image object
- :param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle
- for this tile
- :returns: None
- """
-
- # following c code
- self.im = im
-
- if extents:
- (x0, y0, x1, y1) = extents
- else:
- (x0, y0, x1, y1) = (0, 0, 0, 0)
-
- if x0 == 0 and x1 == 0:
- self.state.xsize, self.state.ysize = self.im.size
- else:
- self.state.xoff = x0
- self.state.yoff = y0
- self.state.xsize = x1 - x0
- self.state.ysize = y1 - y0
-
- if self.state.xsize <= 0 or self.state.ysize <= 0:
- msg = "Size cannot be negative"
- raise ValueError(msg)
-
- if (
- self.state.xsize + self.state.xoff > self.im.size[0]
- or self.state.ysize + self.state.yoff > self.im.size[1]
- ):
- msg = "Tile cannot extend outside image"
- raise ValueError(msg)
-
-
-class PyDecoder(PyCodec):
- """
- Python implementation of a format decoder. Override this class and
- add the decoding logic in the :meth:`decode` method.
-
- See :ref:`Writing Your Own File Codec in Python`
- """
-
- _pulls_fd = False
-
- @property
- def pulls_fd(self):
- return self._pulls_fd
-
- def decode(self, buffer):
- """
- Override to perform the decoding process.
-
- :param buffer: A bytes object with the data to be decoded.
- :returns: A tuple of ``(bytes consumed, errcode)``.
- If finished with decoding return -1 for the bytes consumed.
- Err codes are from :data:`.ImageFile.ERRORS`.
- """
- raise NotImplementedError()
-
- def set_as_raw(self, data, rawmode=None):
- """
- Convenience method to set the internal image from a stream of raw data
-
- :param data: Bytes to be set
- :param rawmode: The rawmode to be used for the decoder.
- If not specified, it will default to the mode of the image
- :returns: None
- """
-
- if not rawmode:
- rawmode = self.mode
- d = Image._getdecoder(self.mode, "raw", rawmode)
- d.setimage(self.im, self.state.extents())
- s = d.decode(data)
-
- if s[0] >= 0:
- msg = "not enough image data"
- raise ValueError(msg)
- if s[1] != 0:
- msg = "cannot decode image data"
- raise ValueError(msg)
-
-
-class PyEncoder(PyCodec):
- """
- Python implementation of a format encoder. Override this class and
- add the decoding logic in the :meth:`encode` method.
-
- See :ref:`Writing Your Own File Codec in Python`
- """
-
- _pushes_fd = False
-
- @property
- def pushes_fd(self):
- return self._pushes_fd
-
- def encode(self, bufsize):
- """
- Override to perform the encoding process.
-
- :param bufsize: Buffer size.
- :returns: A tuple of ``(bytes encoded, errcode, bytes)``.
- If finished with encoding return 1 for the error code.
- Err codes are from :data:`.ImageFile.ERRORS`.
- """
- raise NotImplementedError()
-
- def encode_to_pyfd(self):
- """
- If ``pushes_fd`` is ``True``, then this method will be used,
- and ``encode()`` will only be called once.
-
- :returns: A tuple of ``(bytes consumed, errcode)``.
- Err codes are from :data:`.ImageFile.ERRORS`.
- """
- if not self.pushes_fd:
- return 0, -8 # bad configuration
- bytes_consumed, errcode, data = self.encode(0)
- if data:
- self.fd.write(data)
- return bytes_consumed, errcode
-
- def encode_to_file(self, fh, bufsize):
- """
- :param fh: File handle.
- :param bufsize: Buffer size.
-
- :returns: If finished successfully, return 0.
- Otherwise, return an error code. Err codes are from
- :data:`.ImageFile.ERRORS`.
- """
- errcode = 0
- while errcode == 0:
- status, errcode, buf = self.encode(bufsize)
- if status > 0:
- fh.write(buf[status:])
- return errcode
diff --git a/spaces/Suniilkumaar/MusicGen-updated/audiocraft/models/builders.py b/spaces/Suniilkumaar/MusicGen-updated/audiocraft/models/builders.py
deleted file mode 100644
index 77ee5f96fea2e3c9e475fe961bc1a5ee473ed8eb..0000000000000000000000000000000000000000
--- a/spaces/Suniilkumaar/MusicGen-updated/audiocraft/models/builders.py
+++ /dev/null
@@ -1,218 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-All the functions to build the relevant models and modules
-from the Hydra config.
-"""
-
-import typing as tp
-import warnings
-
-import audiocraft
-import omegaconf
-import torch
-
-from .encodec import CompressionModel, EncodecModel, FlattenedCompressionModel # noqa
-from .lm import LMModel
-from ..modules.codebooks_patterns import (
- CodebooksPatternProvider,
- DelayedPatternProvider,
- ParallelPatternProvider,
- UnrolledPatternProvider,
- VALLEPattern,
- MusicLMPattern,
-)
-from ..modules.conditioners import (
- BaseConditioner,
- ConditioningProvider,
- LUTConditioner,
- T5Conditioner,
- ConditionFuser,
- ChromaStemConditioner,
-)
-from .. import quantization as qt
-from ..utils.utils import dict_from_config
-
-
-def get_quantizer(quantizer: str, cfg: omegaconf.DictConfig, dimension: int) -> qt.BaseQuantizer:
- klass = {
- 'no_quant': qt.DummyQuantizer,
- 'rvq': qt.ResidualVectorQuantizer
- }[quantizer]
- kwargs = dict_from_config(getattr(cfg, quantizer))
- if quantizer != 'no_quant':
- kwargs['dimension'] = dimension
- return klass(**kwargs)
-
-
-def get_encodec_autoencoder(encoder_name: str, cfg: omegaconf.DictConfig):
- if encoder_name == 'seanet':
- kwargs = dict_from_config(getattr(cfg, 'seanet'))
- encoder_override_kwargs = kwargs.pop('encoder')
- decoder_override_kwargs = kwargs.pop('decoder')
- encoder_kwargs = {**kwargs, **encoder_override_kwargs}
- decoder_kwargs = {**kwargs, **decoder_override_kwargs}
- encoder = audiocraft.modules.SEANetEncoder(**encoder_kwargs)
- decoder = audiocraft.modules.SEANetDecoder(**decoder_kwargs)
- return encoder, decoder
- else:
- raise KeyError(f'Unexpected compression model {cfg.compression_model}')
-
-
-def get_compression_model(cfg: omegaconf.DictConfig) -> CompressionModel:
- """Instantiate a compression model.
- """
- if cfg.compression_model == 'encodec':
- kwargs = dict_from_config(getattr(cfg, 'encodec'))
- encoder_name = kwargs.pop('autoencoder')
- quantizer_name = kwargs.pop('quantizer')
- encoder, decoder = get_encodec_autoencoder(encoder_name, cfg)
- quantizer = get_quantizer(quantizer_name, cfg, encoder.dimension)
- frame_rate = kwargs['sample_rate'] // encoder.hop_length
- renormalize = kwargs.pop('renormalize', None)
- renorm = kwargs.pop('renorm')
- if renormalize is None:
- renormalize = renorm is not None
- warnings.warn("You are using a deprecated EnCodec model. Please migrate to new renormalization.")
- return EncodecModel(encoder, decoder, quantizer,
- frame_rate=frame_rate, renormalize=renormalize, **kwargs).to(cfg.device)
- else:
- raise KeyError(f'Unexpected compression model {cfg.compression_model}')
-
-
-def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel:
- """Instantiate a transformer LM.
- """
- if cfg.lm_model == 'transformer_lm':
- kwargs = dict_from_config(getattr(cfg, 'transformer_lm'))
- n_q = kwargs['n_q']
- q_modeling = kwargs.pop('q_modeling', None)
- codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern')
- attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout'))
- cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance'))
- cfg_prob, cfg_coef = cls_free_guidance["training_dropout"], cls_free_guidance["inference_coef"]
- fuser = get_condition_fuser(cfg)
- condition_provider = get_conditioner_provider(kwargs["dim"], cfg).to(cfg.device)
- if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programatically
- kwargs['cross_attention'] = True
- if codebooks_pattern_cfg.modeling is None:
- assert q_modeling is not None, \
- 'LM model should either have a codebook pattern defined or transformer_lm.q_modeling'
- codebooks_pattern_cfg = omegaconf.OmegaConf.create(
- {'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}}
- )
- pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg)
- return LMModel(
- pattern_provider=pattern_provider,
- condition_provider=condition_provider,
- fuser=fuser,
- cfg_dropout=cfg_prob,
- cfg_coef=cfg_coef,
- attribute_dropout=attribute_dropout,
- dtype=getattr(torch, cfg.dtype),
- device=cfg.device,
- **kwargs
- ).to(cfg.device)
- else:
- raise KeyError(f'Unexpected LM model {cfg.lm_model}')
-
-
-def get_conditioner_provider(output_dim: int, cfg: omegaconf.DictConfig) -> ConditioningProvider:
- """Instantiate a conditioning model.
- """
- device = cfg.device
- duration = cfg.dataset.segment_duration
- cfg = getattr(cfg, "conditioners")
- cfg = omegaconf.OmegaConf.create({}) if cfg is None else cfg
- conditioners: tp.Dict[str, BaseConditioner] = {}
- with omegaconf.open_dict(cfg):
- condition_provider_args = cfg.pop('args', {})
- for cond, cond_cfg in cfg.items():
- model_type = cond_cfg["model"]
- model_args = cond_cfg[model_type]
- if model_type == "t5":
- conditioners[str(cond)] = T5Conditioner(output_dim=output_dim, device=device, **model_args)
- elif model_type == "lut":
- conditioners[str(cond)] = LUTConditioner(output_dim=output_dim, **model_args)
- elif model_type == "chroma_stem":
- model_args.pop('cache_path', None)
- conditioners[str(cond)] = ChromaStemConditioner(
- output_dim=output_dim,
- duration=duration,
- device=device,
- **model_args
- )
- else:
- raise ValueError(f"unrecognized conditioning model: {model_type}")
- conditioner = ConditioningProvider(conditioners, device=device, **condition_provider_args)
- return conditioner
-
-
-def get_condition_fuser(cfg: omegaconf.DictConfig) -> ConditionFuser:
- """Instantiate a condition fuser object.
- """
- fuser_cfg = getattr(cfg, "fuser")
- fuser_methods = ["sum", "cross", "prepend", "input_interpolate"]
- fuse2cond = {k: fuser_cfg[k] for k in fuser_methods}
- kwargs = {k: v for k, v in fuser_cfg.items() if k not in fuser_methods}
- fuser = ConditionFuser(fuse2cond=fuse2cond, **kwargs)
- return fuser
-
-
-def get_codebooks_pattern_provider(n_q: int, cfg: omegaconf.DictConfig) -> CodebooksPatternProvider:
- """Instantiate a codebooks pattern provider object.
- """
- pattern_providers = {
- 'parallel': ParallelPatternProvider,
- 'delay': DelayedPatternProvider,
- 'unroll': UnrolledPatternProvider,
- 'valle': VALLEPattern,
- 'musiclm': MusicLMPattern,
- }
- name = cfg.modeling
- kwargs = dict_from_config(cfg.get(name)) if hasattr(cfg, name) else {}
- klass = pattern_providers[name]
- return klass(n_q, **kwargs)
-
-
-def get_debug_compression_model(device='cpu'):
- """Instantiate a debug compression model to be used for unit tests.
- """
- seanet_kwargs = {
- 'n_filters': 4,
- 'n_residual_layers': 1,
- 'dimension': 32,
- 'ratios': [10, 8, 16] # 25 Hz at 32kHz
- }
- encoder = audiocraft.modules.SEANetEncoder(**seanet_kwargs)
- decoder = audiocraft.modules.SEANetDecoder(**seanet_kwargs)
- quantizer = qt.ResidualVectorQuantizer(dimension=32, bins=400, n_q=4)
- init_x = torch.randn(8, 32, 128)
- quantizer(init_x, 1) # initialize kmeans etc.
- compression_model = EncodecModel(
- encoder, decoder, quantizer,
- frame_rate=25, sample_rate=32000, channels=1).to(device)
- return compression_model.eval()
-
-
-def get_debug_lm_model(device='cpu'):
- """Instantiate a debug LM to be used for unit tests.
- """
- pattern = DelayedPatternProvider(n_q=4)
- dim = 16
- providers = {
- 'description': LUTConditioner(n_bins=128, dim=dim, output_dim=dim, tokenizer="whitespace"),
- }
- condition_provider = ConditioningProvider(providers)
- fuser = ConditionFuser(
- {'cross': ['description'], 'prepend': [],
- 'sum': [], 'input_interpolate': []})
- lm = LMModel(
- pattern, condition_provider, fuser,
- n_q=4, card=400, dim=dim, num_heads=4, custom=True, num_layers=2,
- cross_attention=True, causal=True)
- return lm.to(device).eval()
diff --git a/spaces/Superlang/ImageProcessor/annotator/mlsd/models/mbv2_mlsd_large.py b/spaces/Superlang/ImageProcessor/annotator/mlsd/models/mbv2_mlsd_large.py
deleted file mode 100644
index 5b9799e7573ca41549b3c3b13ac47b906b369603..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/mlsd/models/mbv2_mlsd_large.py
+++ /dev/null
@@ -1,292 +0,0 @@
-import os
-import sys
-import torch
-import torch.nn as nn
-import torch.utils.model_zoo as model_zoo
-from torch.nn import functional as F
-
-
-class BlockTypeA(nn.Module):
- def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
- super(BlockTypeA, self).__init__()
- self.conv1 = nn.Sequential(
- nn.Conv2d(in_c2, out_c2, kernel_size=1),
- nn.BatchNorm2d(out_c2),
- nn.ReLU(inplace=True)
- )
- self.conv2 = nn.Sequential(
- nn.Conv2d(in_c1, out_c1, kernel_size=1),
- nn.BatchNorm2d(out_c1),
- nn.ReLU(inplace=True)
- )
- self.upscale = upscale
-
- def forward(self, a, b):
- b = self.conv1(b)
- a = self.conv2(a)
- if self.upscale:
- b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
- return torch.cat((a, b), dim=1)
-
-
-class BlockTypeB(nn.Module):
- def __init__(self, in_c, out_c):
- super(BlockTypeB, self).__init__()
- self.conv1 = nn.Sequential(
- nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
- nn.BatchNorm2d(in_c),
- nn.ReLU()
- )
- self.conv2 = nn.Sequential(
- nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
- nn.BatchNorm2d(out_c),
- nn.ReLU()
- )
-
- def forward(self, x):
- x = self.conv1(x) + x
- x = self.conv2(x)
- return x
-
-class BlockTypeC(nn.Module):
- def __init__(self, in_c, out_c):
- super(BlockTypeC, self).__init__()
- self.conv1 = nn.Sequential(
- nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
- nn.BatchNorm2d(in_c),
- nn.ReLU()
- )
- self.conv2 = nn.Sequential(
- nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
- nn.BatchNorm2d(in_c),
- nn.ReLU()
- )
- self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
-
- def forward(self, x):
- x = self.conv1(x)
- x = self.conv2(x)
- x = self.conv3(x)
- return x
-
-def _make_divisible(v, divisor, min_value=None):
- """
- This function is taken from the original tf repo.
- It ensures that all layers have a channel number that is divisible by 8
- It can be seen here:
- https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
- :param v:
- :param divisor:
- :param min_value:
- :return:
- """
- if min_value is None:
- min_value = divisor
- new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
- # Make sure that round down does not go down by more than 10%.
- if new_v < 0.9 * v:
- new_v += divisor
- return new_v
-
-
-class ConvBNReLU(nn.Sequential):
- def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
- self.channel_pad = out_planes - in_planes
- self.stride = stride
- #padding = (kernel_size - 1) // 2
-
- # TFLite uses slightly different padding than PyTorch
- if stride == 2:
- padding = 0
- else:
- padding = (kernel_size - 1) // 2
-
- super(ConvBNReLU, self).__init__(
- nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
- nn.BatchNorm2d(out_planes),
- nn.ReLU6(inplace=True)
- )
- self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
-
-
- def forward(self, x):
- # TFLite uses different padding
- if self.stride == 2:
- x = F.pad(x, (0, 1, 0, 1), "constant", 0)
- #print(x.shape)
-
- for module in self:
- if not isinstance(module, nn.MaxPool2d):
- x = module(x)
- return x
-
-
-class InvertedResidual(nn.Module):
- def __init__(self, inp, oup, stride, expand_ratio):
- super(InvertedResidual, self).__init__()
- self.stride = stride
- assert stride in [1, 2]
-
- hidden_dim = int(round(inp * expand_ratio))
- self.use_res_connect = self.stride == 1 and inp == oup
-
- layers = []
- if expand_ratio != 1:
- # pw
- layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
- layers.extend([
- # dw
- ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
- # pw-linear
- nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
- nn.BatchNorm2d(oup),
- ])
- self.conv = nn.Sequential(*layers)
-
- def forward(self, x):
- if self.use_res_connect:
- return x + self.conv(x)
- else:
- return self.conv(x)
-
-
-class MobileNetV2(nn.Module):
- def __init__(self, pretrained=True):
- """
- MobileNet V2 main class
- Args:
- num_classes (int): Number of classes
- width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
- inverted_residual_setting: Network structure
- round_nearest (int): Round the number of channels in each layer to be a multiple of this number
- Set to 1 to turn off rounding
- block: Module specifying inverted residual building block for mobilenet
- """
- super(MobileNetV2, self).__init__()
-
- block = InvertedResidual
- input_channel = 32
- last_channel = 1280
- width_mult = 1.0
- round_nearest = 8
-
- inverted_residual_setting = [
- # t, c, n, s
- [1, 16, 1, 1],
- [6, 24, 2, 2],
- [6, 32, 3, 2],
- [6, 64, 4, 2],
- [6, 96, 3, 1],
- #[6, 160, 3, 2],
- #[6, 320, 1, 1],
- ]
-
- # only check the first element, assuming user knows t,c,n,s are required
- if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
- raise ValueError("inverted_residual_setting should be non-empty "
- "or a 4-element list, got {}".format(inverted_residual_setting))
-
- # building first layer
- input_channel = _make_divisible(input_channel * width_mult, round_nearest)
- self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
- features = [ConvBNReLU(4, input_channel, stride=2)]
- # building inverted residual blocks
- for t, c, n, s in inverted_residual_setting:
- output_channel = _make_divisible(c * width_mult, round_nearest)
- for i in range(n):
- stride = s if i == 0 else 1
- features.append(block(input_channel, output_channel, stride, expand_ratio=t))
- input_channel = output_channel
-
- self.features = nn.Sequential(*features)
- self.fpn_selected = [1, 3, 6, 10, 13]
- # weight initialization
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- nn.init.kaiming_normal_(m.weight, mode='fan_out')
- if m.bias is not None:
- nn.init.zeros_(m.bias)
- elif isinstance(m, nn.BatchNorm2d):
- nn.init.ones_(m.weight)
- nn.init.zeros_(m.bias)
- elif isinstance(m, nn.Linear):
- nn.init.normal_(m.weight, 0, 0.01)
- nn.init.zeros_(m.bias)
- if pretrained:
- self._load_pretrained_model()
-
- def _forward_impl(self, x):
- # This exists since TorchScript doesn't support inheritance, so the superclass method
- # (this one) needs to have a name other than `forward` that can be accessed in a subclass
- fpn_features = []
- for i, f in enumerate(self.features):
- if i > self.fpn_selected[-1]:
- break
- x = f(x)
- if i in self.fpn_selected:
- fpn_features.append(x)
-
- c1, c2, c3, c4, c5 = fpn_features
- return c1, c2, c3, c4, c5
-
-
- def forward(self, x):
- return self._forward_impl(x)
-
- def _load_pretrained_model(self):
- pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
- model_dict = {}
- state_dict = self.state_dict()
- for k, v in pretrain_dict.items():
- if k in state_dict:
- model_dict[k] = v
- state_dict.update(model_dict)
- self.load_state_dict(state_dict)
-
-
-class MobileV2_MLSD_Large(nn.Module):
- def __init__(self):
- super(MobileV2_MLSD_Large, self).__init__()
-
- self.backbone = MobileNetV2(pretrained=False)
- ## A, B
- self.block15 = BlockTypeA(in_c1= 64, in_c2= 96,
- out_c1= 64, out_c2=64,
- upscale=False)
- self.block16 = BlockTypeB(128, 64)
-
- ## A, B
- self.block17 = BlockTypeA(in_c1 = 32, in_c2 = 64,
- out_c1= 64, out_c2= 64)
- self.block18 = BlockTypeB(128, 64)
-
- ## A, B
- self.block19 = BlockTypeA(in_c1=24, in_c2=64,
- out_c1=64, out_c2=64)
- self.block20 = BlockTypeB(128, 64)
-
- ## A, B, C
- self.block21 = BlockTypeA(in_c1=16, in_c2=64,
- out_c1=64, out_c2=64)
- self.block22 = BlockTypeB(128, 64)
-
- self.block23 = BlockTypeC(64, 16)
-
- def forward(self, x):
- c1, c2, c3, c4, c5 = self.backbone(x)
-
- x = self.block15(c4, c5)
- x = self.block16(x)
-
- x = self.block17(c3, x)
- x = self.block18(x)
-
- x = self.block19(c2, x)
- x = self.block20(x)
-
- x = self.block21(c1, x)
- x = self.block22(x)
- x = self.block23(x)
- x = x[:, 7:, :, :]
-
- return x
\ No newline at end of file
diff --git a/spaces/Superlang/ImageProcessor/annotator/mlsd/utils.py b/spaces/Superlang/ImageProcessor/annotator/mlsd/utils.py
deleted file mode 100644
index a1de3bb95303b0d9906b0d8fa8a77f1d7d2d87ab..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/mlsd/utils.py
+++ /dev/null
@@ -1,582 +0,0 @@
-'''
-modified by lihaoweicv
-pytorch version
-'''
-
-'''
-M-LSD
-Copyright 2021-present NAVER Corp.
-Apache License v2.0
-'''
-
-import os
-import numpy as np
-import cv2
-import torch
-from torch.nn import functional as F
-
-
-def deccode_output_score_and_ptss(tpMap, topk_n=200, ksize=5):
- '''
- tpMap:
- center: tpMap[1, 0, :, :]
- displacement: tpMap[1, 1:5, :, :]
- '''
- b, c, h, w = tpMap.shape
- assert b == 1, 'only support bsize==1'
- displacement = tpMap[:, 1:5, :, :][0]
- center = tpMap[:, 0, :, :]
- heat = torch.sigmoid(center)
- hmax = F.max_pool2d(heat, (ksize, ksize), stride=1, padding=(ksize - 1) // 2)
- keep = (hmax == heat).float()
- heat = heat * keep
- heat = heat.reshape(-1, )
-
- scores, indices = torch.topk(heat, topk_n, dim=-1, largest=True)
- yy = torch.floor_divide(indices, w).unsqueeze(-1)
- xx = torch.fmod(indices, w).unsqueeze(-1)
- ptss = torch.cat((yy, xx), dim=-1)
-
- ptss = ptss.detach().cpu().numpy()
- scores = scores.detach().cpu().numpy()
- displacement = displacement.detach().cpu().numpy()
- displacement = displacement.transpose((1, 2, 0))
- return ptss, scores, displacement
-
-
-def pred_lines(image, model,
- input_shape=[512, 512],
- score_thr=0.10,
- dist_thr=20.0,
- device="cpu"):
- h, w, _ = image.shape
- h_ratio, w_ratio = [h / input_shape[0], w / input_shape[1]]
-
- resized_image = np.concatenate([cv2.resize(image, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA),
- np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
-
- resized_image = resized_image.transpose((2, 0, 1))
- batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
- batch_image = (batch_image / 127.5) - 1.0
-
- batch_image = torch.from_numpy(batch_image).float().to(device)
- outputs = model(batch_image)
- pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
- start = vmap[:, :, :2]
- end = vmap[:, :, 2:]
- dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
-
- segments_list = []
- for center, score in zip(pts, pts_score):
- y, x = center
- distance = dist_map[y, x]
- if score > score_thr and distance > dist_thr:
- disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
- x_start = x + disp_x_start
- y_start = y + disp_y_start
- x_end = x + disp_x_end
- y_end = y + disp_y_end
- segments_list.append([x_start, y_start, x_end, y_end])
-
- lines = 2 * np.array(segments_list) # 256 > 512
- lines[:, 0] = lines[:, 0] * w_ratio
- lines[:, 1] = lines[:, 1] * h_ratio
- lines[:, 2] = lines[:, 2] * w_ratio
- lines[:, 3] = lines[:, 3] * h_ratio
-
- return lines
-
-
-def pred_squares(image,
- model,
- input_shape=[512, 512],
- params={'score': 0.06,
- 'outside_ratio': 0.28,
- 'inside_ratio': 0.45,
- 'w_overlap': 0.0,
- 'w_degree': 1.95,
- 'w_length': 0.0,
- 'w_area': 1.86,
- 'w_center': 0.14}
- ,device="cpu"):
- '''
- shape = [height, width]
- '''
- h, w, _ = image.shape
- original_shape = [h, w]
-
- resized_image = np.concatenate([cv2.resize(image, (input_shape[0], input_shape[1]), interpolation=cv2.INTER_AREA),
- np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
- resized_image = resized_image.transpose((2, 0, 1))
- batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
- batch_image = (batch_image / 127.5) - 1.0
-
- batch_image = torch.from_numpy(batch_image).float().to(device)
- outputs = model(batch_image)
-
- pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
- start = vmap[:, :, :2] # (x, y)
- end = vmap[:, :, 2:] # (x, y)
- dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
-
- junc_list = []
- segments_list = []
- for junc, score in zip(pts, pts_score):
- y, x = junc
- distance = dist_map[y, x]
- if score > params['score'] and distance > 20.0:
- junc_list.append([x, y])
- disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
- d_arrow = 1.0
- x_start = x + d_arrow * disp_x_start
- y_start = y + d_arrow * disp_y_start
- x_end = x + d_arrow * disp_x_end
- y_end = y + d_arrow * disp_y_end
- segments_list.append([x_start, y_start, x_end, y_end])
-
- segments = np.array(segments_list)
-
- ####### post processing for squares
- # 1. get unique lines
- point = np.array([[0, 0]])
- point = point[0]
- start = segments[:, :2]
- end = segments[:, 2:]
- diff = start - end
- a = diff[:, 1]
- b = -diff[:, 0]
- c = a * start[:, 0] + b * start[:, 1]
-
- d = np.abs(a * point[0] + b * point[1] - c) / np.sqrt(a ** 2 + b ** 2 + 1e-10)
- theta = np.arctan2(diff[:, 0], diff[:, 1]) * 180 / np.pi
- theta[theta < 0.0] += 180
- hough = np.concatenate([d[:, None], theta[:, None]], axis=-1)
-
- d_quant = 1
- theta_quant = 2
- hough[:, 0] //= d_quant
- hough[:, 1] //= theta_quant
- _, indices, counts = np.unique(hough, axis=0, return_index=True, return_counts=True)
-
- acc_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='float32')
- idx_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='int32') - 1
- yx_indices = hough[indices, :].astype('int32')
- acc_map[yx_indices[:, 0], yx_indices[:, 1]] = counts
- idx_map[yx_indices[:, 0], yx_indices[:, 1]] = indices
-
- acc_map_np = acc_map
- # acc_map = acc_map[None, :, :, None]
- #
- # ### fast suppression using tensorflow op
- # acc_map = tf.constant(acc_map, dtype=tf.float32)
- # max_acc_map = tf.keras.layers.MaxPool2D(pool_size=(5, 5), strides=1, padding='same')(acc_map)
- # acc_map = acc_map * tf.cast(tf.math.equal(acc_map, max_acc_map), tf.float32)
- # flatten_acc_map = tf.reshape(acc_map, [1, -1])
- # topk_values, topk_indices = tf.math.top_k(flatten_acc_map, k=len(pts))
- # _, h, w, _ = acc_map.shape
- # y = tf.expand_dims(topk_indices // w, axis=-1)
- # x = tf.expand_dims(topk_indices % w, axis=-1)
- # yx = tf.concat([y, x], axis=-1)
-
- ### fast suppression using pytorch op
- acc_map = torch.from_numpy(acc_map_np).unsqueeze(0).unsqueeze(0)
- _, _, h, w = acc_map.shape
- max_acc_map = F.max_pool2d(acc_map, kernel_size=5, stride=1, padding=2)
- acc_map = acc_map * ((acc_map == max_acc_map).float())
- flatten_acc_map = acc_map.reshape([-1, ])
-
- scores, indices = torch.topk(flatten_acc_map, len(pts), dim=-1, largest=True)
- yy = torch.div(indices, w, rounding_mode='floor').unsqueeze(-1)
- xx = torch.fmod(indices, w).unsqueeze(-1)
- yx = torch.cat((yy, xx), dim=-1)
-
- yx = yx.detach().cpu().numpy()
-
- topk_values = scores.detach().cpu().numpy()
- indices = idx_map[yx[:, 0], yx[:, 1]]
- basis = 5 // 2
-
- merged_segments = []
- for yx_pt, max_indice, value in zip(yx, indices, topk_values):
- y, x = yx_pt
- if max_indice == -1 or value == 0:
- continue
- segment_list = []
- for y_offset in range(-basis, basis + 1):
- for x_offset in range(-basis, basis + 1):
- indice = idx_map[y + y_offset, x + x_offset]
- cnt = int(acc_map_np[y + y_offset, x + x_offset])
- if indice != -1:
- segment_list.append(segments[indice])
- if cnt > 1:
- check_cnt = 1
- current_hough = hough[indice]
- for new_indice, new_hough in enumerate(hough):
- if (current_hough == new_hough).all() and indice != new_indice:
- segment_list.append(segments[new_indice])
- check_cnt += 1
- if check_cnt == cnt:
- break
- group_segments = np.array(segment_list).reshape([-1, 2])
- sorted_group_segments = np.sort(group_segments, axis=0)
- x_min, y_min = sorted_group_segments[0, :]
- x_max, y_max = sorted_group_segments[-1, :]
-
- deg = theta[max_indice]
- if deg >= 90:
- merged_segments.append([x_min, y_max, x_max, y_min])
- else:
- merged_segments.append([x_min, y_min, x_max, y_max])
-
- # 2. get intersections
- new_segments = np.array(merged_segments) # (x1, y1, x2, y2)
- start = new_segments[:, :2] # (x1, y1)
- end = new_segments[:, 2:] # (x2, y2)
- new_centers = (start + end) / 2.0
- diff = start - end
- dist_segments = np.sqrt(np.sum(diff ** 2, axis=-1))
-
- # ax + by = c
- a = diff[:, 1]
- b = -diff[:, 0]
- c = a * start[:, 0] + b * start[:, 1]
- pre_det = a[:, None] * b[None, :]
- det = pre_det - np.transpose(pre_det)
-
- pre_inter_y = a[:, None] * c[None, :]
- inter_y = (pre_inter_y - np.transpose(pre_inter_y)) / (det + 1e-10)
- pre_inter_x = c[:, None] * b[None, :]
- inter_x = (pre_inter_x - np.transpose(pre_inter_x)) / (det + 1e-10)
- inter_pts = np.concatenate([inter_x[:, :, None], inter_y[:, :, None]], axis=-1).astype('int32')
-
- # 3. get corner information
- # 3.1 get distance
- '''
- dist_segments:
- | dist(0), dist(1), dist(2), ...|
- dist_inter_to_segment1:
- | dist(inter,0), dist(inter,0), dist(inter,0), ... |
- | dist(inter,1), dist(inter,1), dist(inter,1), ... |
- ...
- dist_inter_to_semgnet2:
- | dist(inter,0), dist(inter,1), dist(inter,2), ... |
- | dist(inter,0), dist(inter,1), dist(inter,2), ... |
- ...
- '''
-
- dist_inter_to_segment1_start = np.sqrt(
- np.sum(((inter_pts - start[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
- dist_inter_to_segment1_end = np.sqrt(
- np.sum(((inter_pts - end[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
- dist_inter_to_segment2_start = np.sqrt(
- np.sum(((inter_pts - start[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
- dist_inter_to_segment2_end = np.sqrt(
- np.sum(((inter_pts - end[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
-
- # sort ascending
- dist_inter_to_segment1 = np.sort(
- np.concatenate([dist_inter_to_segment1_start, dist_inter_to_segment1_end], axis=-1),
- axis=-1) # [n_batch, n_batch, 2]
- dist_inter_to_segment2 = np.sort(
- np.concatenate([dist_inter_to_segment2_start, dist_inter_to_segment2_end], axis=-1),
- axis=-1) # [n_batch, n_batch, 2]
-
- # 3.2 get degree
- inter_to_start = new_centers[:, None, :] - inter_pts
- deg_inter_to_start = np.arctan2(inter_to_start[:, :, 1], inter_to_start[:, :, 0]) * 180 / np.pi
- deg_inter_to_start[deg_inter_to_start < 0.0] += 360
- inter_to_end = new_centers[None, :, :] - inter_pts
- deg_inter_to_end = np.arctan2(inter_to_end[:, :, 1], inter_to_end[:, :, 0]) * 180 / np.pi
- deg_inter_to_end[deg_inter_to_end < 0.0] += 360
-
- '''
- B -- G
- | |
- C -- R
- B : blue / G: green / C: cyan / R: red
-
- 0 -- 1
- | |
- 3 -- 2
- '''
- # rename variables
- deg1_map, deg2_map = deg_inter_to_start, deg_inter_to_end
- # sort deg ascending
- deg_sort = np.sort(np.concatenate([deg1_map[:, :, None], deg2_map[:, :, None]], axis=-1), axis=-1)
-
- deg_diff_map = np.abs(deg1_map - deg2_map)
- # we only consider the smallest degree of intersect
- deg_diff_map[deg_diff_map > 180] = 360 - deg_diff_map[deg_diff_map > 180]
-
- # define available degree range
- deg_range = [60, 120]
-
- corner_dict = {corner_info: [] for corner_info in range(4)}
- inter_points = []
- for i in range(inter_pts.shape[0]):
- for j in range(i + 1, inter_pts.shape[1]):
- # i, j > line index, always i < j
- x, y = inter_pts[i, j, :]
- deg1, deg2 = deg_sort[i, j, :]
- deg_diff = deg_diff_map[i, j]
-
- check_degree = deg_diff > deg_range[0] and deg_diff < deg_range[1]
-
- outside_ratio = params['outside_ratio'] # over ratio >>> drop it!
- inside_ratio = params['inside_ratio'] # over ratio >>> drop it!
- check_distance = ((dist_inter_to_segment1[i, j, 1] >= dist_segments[i] and \
- dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * outside_ratio) or \
- (dist_inter_to_segment1[i, j, 1] <= dist_segments[i] and \
- dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * inside_ratio)) and \
- ((dist_inter_to_segment2[i, j, 1] >= dist_segments[j] and \
- dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * outside_ratio) or \
- (dist_inter_to_segment2[i, j, 1] <= dist_segments[j] and \
- dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * inside_ratio))
-
- if check_degree and check_distance:
- corner_info = None
-
- if (deg1 >= 0 and deg1 <= 45 and deg2 >= 45 and deg2 <= 120) or \
- (deg2 >= 315 and deg1 >= 45 and deg1 <= 120):
- corner_info, color_info = 0, 'blue'
- elif (deg1 >= 45 and deg1 <= 125 and deg2 >= 125 and deg2 <= 225):
- corner_info, color_info = 1, 'green'
- elif (deg1 >= 125 and deg1 <= 225 and deg2 >= 225 and deg2 <= 315):
- corner_info, color_info = 2, 'black'
- elif (deg1 >= 0 and deg1 <= 45 and deg2 >= 225 and deg2 <= 315) or \
- (deg2 >= 315 and deg1 >= 225 and deg1 <= 315):
- corner_info, color_info = 3, 'cyan'
- else:
- corner_info, color_info = 4, 'red' # we don't use it
- continue
-
- corner_dict[corner_info].append([x, y, i, j])
- inter_points.append([x, y])
-
- square_list = []
- connect_list = []
- segments_list = []
- for corner0 in corner_dict[0]:
- for corner1 in corner_dict[1]:
- connect01 = False
- for corner0_line in corner0[2:]:
- if corner0_line in corner1[2:]:
- connect01 = True
- break
- if connect01:
- for corner2 in corner_dict[2]:
- connect12 = False
- for corner1_line in corner1[2:]:
- if corner1_line in corner2[2:]:
- connect12 = True
- break
- if connect12:
- for corner3 in corner_dict[3]:
- connect23 = False
- for corner2_line in corner2[2:]:
- if corner2_line in corner3[2:]:
- connect23 = True
- break
- if connect23:
- for corner3_line in corner3[2:]:
- if corner3_line in corner0[2:]:
- # SQUARE!!!
- '''
- 0 -- 1
- | |
- 3 -- 2
- square_list:
- order: 0 > 1 > 2 > 3
- | x0, y0, x1, y1, x2, y2, x3, y3 |
- | x0, y0, x1, y1, x2, y2, x3, y3 |
- ...
- connect_list:
- order: 01 > 12 > 23 > 30
- | line_idx01, line_idx12, line_idx23, line_idx30 |
- | line_idx01, line_idx12, line_idx23, line_idx30 |
- ...
- segments_list:
- order: 0 > 1 > 2 > 3
- | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
- | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
- ...
- '''
- square_list.append(corner0[:2] + corner1[:2] + corner2[:2] + corner3[:2])
- connect_list.append([corner0_line, corner1_line, corner2_line, corner3_line])
- segments_list.append(corner0[2:] + corner1[2:] + corner2[2:] + corner3[2:])
-
- def check_outside_inside(segments_info, connect_idx):
- # return 'outside or inside', min distance, cover_param, peri_param
- if connect_idx == segments_info[0]:
- check_dist_mat = dist_inter_to_segment1
- else:
- check_dist_mat = dist_inter_to_segment2
-
- i, j = segments_info
- min_dist, max_dist = check_dist_mat[i, j, :]
- connect_dist = dist_segments[connect_idx]
- if max_dist > connect_dist:
- return 'outside', min_dist, 0, 1
- else:
- return 'inside', min_dist, -1, -1
-
- top_square = None
-
- try:
- map_size = input_shape[0] / 2
- squares = np.array(square_list).reshape([-1, 4, 2])
- score_array = []
- connect_array = np.array(connect_list)
- segments_array = np.array(segments_list).reshape([-1, 4, 2])
-
- # get degree of corners:
- squares_rollup = np.roll(squares, 1, axis=1)
- squares_rolldown = np.roll(squares, -1, axis=1)
- vec1 = squares_rollup - squares
- normalized_vec1 = vec1 / (np.linalg.norm(vec1, axis=-1, keepdims=True) + 1e-10)
- vec2 = squares_rolldown - squares
- normalized_vec2 = vec2 / (np.linalg.norm(vec2, axis=-1, keepdims=True) + 1e-10)
- inner_products = np.sum(normalized_vec1 * normalized_vec2, axis=-1) # [n_squares, 4]
- squares_degree = np.arccos(inner_products) * 180 / np.pi # [n_squares, 4]
-
- # get square score
- overlap_scores = []
- degree_scores = []
- length_scores = []
-
- for connects, segments, square, degree in zip(connect_array, segments_array, squares, squares_degree):
- '''
- 0 -- 1
- | |
- 3 -- 2
-
- # segments: [4, 2]
- # connects: [4]
- '''
-
- ###################################### OVERLAP SCORES
- cover = 0
- perimeter = 0
- # check 0 > 1 > 2 > 3
- square_length = []
-
- for start_idx in range(4):
- end_idx = (start_idx + 1) % 4
-
- connect_idx = connects[start_idx] # segment idx of segment01
- start_segments = segments[start_idx]
- end_segments = segments[end_idx]
-
- start_point = square[start_idx]
- end_point = square[end_idx]
-
- # check whether outside or inside
- start_position, start_min, start_cover_param, start_peri_param = check_outside_inside(start_segments,
- connect_idx)
- end_position, end_min, end_cover_param, end_peri_param = check_outside_inside(end_segments, connect_idx)
-
- cover += dist_segments[connect_idx] + start_cover_param * start_min + end_cover_param * end_min
- perimeter += dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min
-
- square_length.append(
- dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min)
-
- overlap_scores.append(cover / perimeter)
- ######################################
- ###################################### DEGREE SCORES
- '''
- deg0 vs deg2
- deg1 vs deg3
- '''
- deg0, deg1, deg2, deg3 = degree
- deg_ratio1 = deg0 / deg2
- if deg_ratio1 > 1.0:
- deg_ratio1 = 1 / deg_ratio1
- deg_ratio2 = deg1 / deg3
- if deg_ratio2 > 1.0:
- deg_ratio2 = 1 / deg_ratio2
- degree_scores.append((deg_ratio1 + deg_ratio2) / 2)
- ######################################
- ###################################### LENGTH SCORES
- '''
- len0 vs len2
- len1 vs len3
- '''
- len0, len1, len2, len3 = square_length
- len_ratio1 = len0 / len2 if len2 > len0 else len2 / len0
- len_ratio2 = len1 / len3 if len3 > len1 else len3 / len1
- length_scores.append((len_ratio1 + len_ratio2) / 2)
-
- ######################################
-
- overlap_scores = np.array(overlap_scores)
- overlap_scores /= np.max(overlap_scores)
-
- degree_scores = np.array(degree_scores)
- # degree_scores /= np.max(degree_scores)
-
- length_scores = np.array(length_scores)
-
- ###################################### AREA SCORES
- area_scores = np.reshape(squares, [-1, 4, 2])
- area_x = area_scores[:, :, 0]
- area_y = area_scores[:, :, 1]
- correction = area_x[:, -1] * area_y[:, 0] - area_y[:, -1] * area_x[:, 0]
- area_scores = np.sum(area_x[:, :-1] * area_y[:, 1:], axis=-1) - np.sum(area_y[:, :-1] * area_x[:, 1:], axis=-1)
- area_scores = 0.5 * np.abs(area_scores + correction)
- area_scores /= (map_size * map_size) # np.max(area_scores)
- ######################################
-
- ###################################### CENTER SCORES
- centers = np.array([[256 // 2, 256 // 2]], dtype='float32') # [1, 2]
- # squares: [n, 4, 2]
- square_centers = np.mean(squares, axis=1) # [n, 2]
- center2center = np.sqrt(np.sum((centers - square_centers) ** 2))
- center_scores = center2center / (map_size / np.sqrt(2.0))
-
- '''
- score_w = [overlap, degree, area, center, length]
- '''
- score_w = [0.0, 1.0, 10.0, 0.5, 1.0]
- score_array = params['w_overlap'] * overlap_scores \
- + params['w_degree'] * degree_scores \
- + params['w_area'] * area_scores \
- - params['w_center'] * center_scores \
- + params['w_length'] * length_scores
-
- best_square = []
-
- sorted_idx = np.argsort(score_array)[::-1]
- score_array = score_array[sorted_idx]
- squares = squares[sorted_idx]
-
- except Exception as e:
- pass
-
- '''return list
- merged_lines, squares, scores
- '''
-
- try:
- new_segments[:, 0] = new_segments[:, 0] * 2 / input_shape[1] * original_shape[1]
- new_segments[:, 1] = new_segments[:, 1] * 2 / input_shape[0] * original_shape[0]
- new_segments[:, 2] = new_segments[:, 2] * 2 / input_shape[1] * original_shape[1]
- new_segments[:, 3] = new_segments[:, 3] * 2 / input_shape[0] * original_shape[0]
- except:
- new_segments = []
-
- try:
- squares[:, :, 0] = squares[:, :, 0] * 2 / input_shape[1] * original_shape[1]
- squares[:, :, 1] = squares[:, :, 1] * 2 / input_shape[0] * original_shape[0]
- except:
- squares = []
- score_array = []
-
- try:
- inter_points = np.array(inter_points)
- inter_points[:, 0] = inter_points[:, 0] * 2 / input_shape[1] * original_shape[1]
- inter_points[:, 1] = inter_points[:, 1] * 2 / input_shape[0] * original_shape[0]
- except:
- inter_points = []
-
- return new_segments, squares, score_array, inter_points
diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/__init__.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/__init__.py
deleted file mode 100644
index bdd994b49294485c27610772f97f177741f5518f..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-from .utils.env import setup_environment
-
-setup_environment()
-
-
-# This line will be programatically read/write by setup.py.
-# Leave them at the bottom of this file and don't touch them.
-__version__ = "0.6"
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/ann_r50-d8.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/ann_r50-d8.py
deleted file mode 100644
index a2cb653827e44e6015b3b83bc578003e614a6aa1..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/ann_r50-d8.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained='open-mmlab://resnet50_v1c',
- backbone=dict(
- type='ResNetV1c',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 2, 4),
- strides=(1, 2, 1, 1),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='ANNHead',
- in_channels=[1024, 2048],
- in_index=[2, 3],
- channels=512,
- project_channels=256,
- query_scales=(1, ),
- key_pool_scales=(1, 3, 6, 8),
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/cnn/bricks/padding.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/cnn/bricks/padding.py
deleted file mode 100644
index e4ac6b28a1789bd551c613a7d3e7b622433ac7ec..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/cnn/bricks/padding.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch.nn as nn
-
-from .registry import PADDING_LAYERS
-
-PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d)
-PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d)
-PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d)
-
-
-def build_padding_layer(cfg, *args, **kwargs):
- """Build padding layer.
-
- Args:
- cfg (None or dict): The padding layer config, which should contain:
- - type (str): Layer type.
- - layer args: Args needed to instantiate a padding layer.
-
- Returns:
- nn.Module: Created padding layer.
- """
- if not isinstance(cfg, dict):
- raise TypeError('cfg must be a dict')
- if 'type' not in cfg:
- raise KeyError('the cfg dict must contain the key "type"')
-
- cfg_ = cfg.copy()
- padding_type = cfg_.pop('type')
- if padding_type not in PADDING_LAYERS:
- raise KeyError(f'Unrecognized padding type {padding_type}.')
- else:
- padding_layer = PADDING_LAYERS.get(padding_type)
-
- layer = padding_layer(*args, **kwargs, **cfg_)
-
- return layer
diff --git a/spaces/Tartan-Ishan/Expression_Classifier/backups/app_v1.py b/spaces/Tartan-Ishan/Expression_Classifier/backups/app_v1.py
deleted file mode 100644
index 792eda36764751af0be4c182b601eb00112f386e..0000000000000000000000000000000000000000
--- a/spaces/Tartan-Ishan/Expression_Classifier/backups/app_v1.py
+++ /dev/null
@@ -1,32 +0,0 @@
-#|export
-
-from fastai.vision.all import *
-import gradio as gr
-
-#|export
-# import pathlib
-# temp = pathlib.PosixPath
-# pathlib.PosixPath = pathlib.WindowsPath
-import pathlib
-plt = platform.system()
-if plt == 'Linux': pathlib.WindowsPath = pathlib.PosixPath
-
-
-#|export
-learn = load_learner('resnet18_emotion_detection1.pkl')
-
-#|export
-
-categories = ('Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise')
-
-def classify_image(img):
- pred, idx, probs = learn.predict(img)
- return dict(zip(categories, map(float, probs)))
-
-#|export
-image = gr.inputs.Image(shape=(256,256))
-label = gr.outputs.Label()
-examples = ['angry.jpg','disgust.jpg', 'fear.jpg', 'happy.jpg', 'neutral.jpg', 'sad.jpg', 'surprise.jpg']
-
-intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)
-intf.launch()
\ No newline at end of file
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py
deleted file mode 100644
index df7a2aedf480ed8dc4aa3645e37420e9b893fae4..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import detectron2.data.transforms as T
-from detectron2.config.lazy import LazyCall as L
-from detectron2.layers.batch_norm import NaiveSyncBatchNorm
-from detectron2.solver import WarmupParamScheduler
-from fvcore.common.param_scheduler import MultiStepParamScheduler
-
-from ..common.data.coco import dataloader
-from ..common.models.mask_rcnn_fpn import model
-from ..common.optim import SGD as optimizer
-from ..common.train import train
-
-# train from scratch
-train.init_checkpoint = ""
-train.amp.enabled = True
-train.ddp.fp16_compression = True
-model.backbone.bottom_up.freeze_at = 0
-
-# SyncBN
-# fmt: off
-model.backbone.bottom_up.stem.norm = \
- model.backbone.bottom_up.stages.norm = \
- model.backbone.norm = "SyncBN"
-
-# Using NaiveSyncBatchNorm becase heads may have empty input. That is not supported by
-# torch.nn.SyncBatchNorm. We can remove this after
-# https://github.com/pytorch/pytorch/issues/36530 is fixed.
-model.roi_heads.box_head.conv_norm = \
- model.roi_heads.mask_head.conv_norm = lambda c: NaiveSyncBatchNorm(c,
- stats_mode="N")
-# fmt: on
-
-# 2conv in RPN:
-# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/modeling/architecture/heads.py#L95-L97 # noqa: E501, B950
-model.proposal_generator.head.conv_dims = [-1, -1]
-
-# 4conv1fc box head
-model.roi_heads.box_head.conv_dims = [256, 256, 256, 256]
-model.roi_heads.box_head.fc_dims = [1024]
-
-# resize_and_crop_image in:
-# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/utils/input_utils.py#L127 # noqa: E501, B950
-image_size = 1024
-dataloader.train.mapper.augmentations = [
- L(T.ResizeScale)(
- min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size
- ),
- L(T.FixedSizeCrop)(crop_size=(image_size, image_size)),
- L(T.RandomFlip)(horizontal=True),
-]
-
-# recompute boxes due to cropping
-dataloader.train.mapper.recompute_boxes = True
-
-# larger batch-size.
-dataloader.train.total_batch_size = 64
-
-# Equivalent to 100 epochs.
-# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep
-train.max_iter = 184375
-
-lr_multiplier = L(WarmupParamScheduler)(
- scheduler=L(MultiStepParamScheduler)(
- values=[1.0, 0.1, 0.01],
- milestones=[163889, 177546],
- num_updates=train.max_iter,
- ),
- warmup_length=500 / train.max_iter,
- warmup_factor=0.067,
-)
-
-optimizer.lr = 0.1
-optimizer.weight_decay = 4e-5
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/custom_roi_heads.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/custom_roi_heads.py
deleted file mode 100644
index 90fadf1a9667cf836223945b22c5147b89ad98a4..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/custom_roi_heads.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import numpy as np
-import json
-import math
-import torch
-from torch import nn
-from torch.autograd.function import Function
-from typing import Dict, List, Optional, Tuple, Union
-
-from detectron2.layers import ShapeSpec
-from detectron2.structures import Boxes, Instances, pairwise_iou
-from detectron2.utils.events import get_event_storage
-
-from detectron2.modeling.box_regression import Box2BoxTransform
-from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
-from detectron2.modeling.roi_heads.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
-from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads
-from detectron2.modeling.roi_heads.box_head import build_box_head
-from .custom_fast_rcnn import CustomFastRCNNOutputLayers
-
-
-@ROI_HEADS_REGISTRY.register()
-class CustomROIHeads(StandardROIHeads):
- @classmethod
- def _init_box_head(self, cfg, input_shape):
- ret = super()._init_box_head(cfg, input_shape)
- del ret['box_predictor']
- ret['box_predictor'] = CustomFastRCNNOutputLayers(
- cfg, ret['box_head'].output_shape)
- self.debug = cfg.DEBUG
- if self.debug:
- self.debug_show_name = cfg.DEBUG_SHOW_NAME
- self.save_debug = cfg.SAVE_DEBUG
- self.vis_thresh = cfg.VIS_THRESH
- self.pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(
- torch.device(cfg.MODEL.DEVICE)).view(3, 1, 1)
- self.pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(
- torch.device(cfg.MODEL.DEVICE)).view(3, 1, 1)
- return ret
-
- def forward(self, images, features, proposals, targets=None):
- """
- enable debug
- """
- if not self.debug:
- del images
- if self.training:
- assert targets
- proposals = self.label_and_sample_proposals(proposals, targets)
- del targets
-
- if self.training:
- losses = self._forward_box(features, proposals)
- losses.update(self._forward_mask(features, proposals))
- losses.update(self._forward_keypoint(features, proposals))
- return proposals, losses
- else:
- pred_instances = self._forward_box(features, proposals)
- pred_instances = self.forward_with_given_boxes(features, pred_instances)
- if self.debug:
- from ..debug import debug_second_stage
- denormalizer = lambda x: x * self.pixel_std + self.pixel_mean
- debug_second_stage(
- [denormalizer(images[0].clone())],
- pred_instances, proposals=proposals,
- debug_show_name=self.debug_show_name)
- return pred_instances, {}
-
-
-@ROI_HEADS_REGISTRY.register()
-class CustomCascadeROIHeads(CascadeROIHeads):
- @classmethod
- def _init_box_head(self, cfg, input_shape):
- self.mult_proposal_score = cfg.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE
- ret = super()._init_box_head(cfg, input_shape)
- del ret['box_predictors']
- cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS
- box_predictors = []
- for box_head, bbox_reg_weights in zip(ret['box_heads'], cascade_bbox_reg_weights):
- box_predictors.append(
- CustomFastRCNNOutputLayers(
- cfg, box_head.output_shape,
- box2box_transform=Box2BoxTransform(weights=bbox_reg_weights)
- ))
- ret['box_predictors'] = box_predictors
- self.debug = cfg.DEBUG
- if self.debug:
- self.debug_show_name = cfg.DEBUG_SHOW_NAME
- self.save_debug = cfg.SAVE_DEBUG
- self.vis_thresh = cfg.VIS_THRESH
- self.pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(
- torch.device(cfg.MODEL.DEVICE)).view(3, 1, 1)
- self.pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(
- torch.device(cfg.MODEL.DEVICE)).view(3, 1, 1)
- return ret
-
-
- def _forward_box(self, features, proposals, targets=None):
- """
- Add mult proposal scores at testing
- """
- if (not self.training) and self.mult_proposal_score:
- if len(proposals) > 0 and proposals[0].has('scores'):
- proposal_scores = [
- p.get('scores') for p in proposals]
- else:
- proposal_scores = [
- p.get('objectness_logits') for p in proposals]
-
- features = [features[f] for f in self.box_in_features]
- head_outputs = [] # (predictor, predictions, proposals)
- prev_pred_boxes = None
- image_sizes = [x.image_size for x in proposals]
- for k in range(self.num_cascade_stages):
- if k > 0:
- proposals = self._create_proposals_from_boxes(prev_pred_boxes, image_sizes)
- if self.training:
- proposals = self._match_and_label_boxes(proposals, k, targets)
- predictions = self._run_stage(features, proposals, k)
- prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals)
- head_outputs.append((self.box_predictor[k], predictions, proposals))
-
- if self.training:
- losses = {}
- storage = get_event_storage()
- for stage, (predictor, predictions, proposals) in enumerate(head_outputs):
- with storage.name_scope("stage{}".format(stage)):
- stage_losses = predictor.losses(predictions, proposals)
- losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()})
- return losses
- else:
- # Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1)
- scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs]
- scores = [
- sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages)
- for scores_per_image in zip(*scores_per_stage)
- ]
-
- if self.mult_proposal_score:
- scores = [(s * ps[:, None]) ** 0.5 \
- for s, ps in zip(scores, proposal_scores)]
-
- predictor, predictions, proposals = head_outputs[-1]
- boxes = predictor.predict_boxes(predictions, proposals)
- pred_instances, _ = fast_rcnn_inference(
- boxes,
- scores,
- image_sizes,
- predictor.test_score_thresh,
- predictor.test_nms_thresh,
- predictor.test_topk_per_image,
- )
-
- return pred_instances
-
- def forward(self, images, features, proposals, targets=None):
- '''
- enable debug
- '''
- if not self.debug:
- del images
- if self.training:
- proposals = self.label_and_sample_proposals(proposals, targets)
-
- if self.training:
- losses = self._forward_box(features, proposals, targets)
- losses.update(self._forward_mask(features, proposals))
- losses.update(self._forward_keypoint(features, proposals))
- return proposals, losses
- else:
- # import pdb; pdb.set_trace()
- pred_instances = self._forward_box(features, proposals)
- pred_instances = self.forward_with_given_boxes(features, pred_instances)
- if self.debug:
- from ..debug import debug_second_stage
- denormalizer = lambda x: x * self.pixel_std + self.pixel_mean
- debug_second_stage(
- [denormalizer(x.clone()) for x in images],
- pred_instances, proposals=proposals,
- save_debug=self.save_debug,
- debug_show_name=self.debug_show_name,
- vis_thresh=self.vis_thresh)
- return pred_instances, {}
-
-
diff --git a/spaces/Th3BossC/TranscriptApi/TranscriptApi/common/utils.py b/spaces/Th3BossC/TranscriptApi/TranscriptApi/common/utils.py
deleted file mode 100644
index 7c4379f36eb83075499056f6d05187e78827ce54..0000000000000000000000000000000000000000
--- a/spaces/Th3BossC/TranscriptApi/TranscriptApi/common/utils.py
+++ /dev/null
@@ -1,218 +0,0 @@
-import os
-import librosa
-import soundfile as sf
-from pytube import YouTube
-import urllib.parse as urlparse
-from moviepy.editor import VideoFileClip
-import shutil
-import whisper
-import torch
-from transformers import pipeline
-from tqdm.auto import tqdm
-from PyPDF2 import PdfReader
-
-
-device = 'cuda' if torch.cuda.is_available() else 'cpu'
-# device = 'cpu'
-
-
-checkpoint = 'Th3BossC/SummarizationModel_t5-small_opeai_tldr'
-
-
-
-
-
-
-############### video queries ###############
-def title(video_id):
- return YouTube('https://www.youtube.com/watch?v=' + video_id).title
-
-def get_video_id(video_url):
- url_data = urlparse.urlparse("http://www.youtube.com/watch?v=z_AbfPXTKms&NR=1")
- query = urlparse.parse_qs(url_data.query)
- video = query["v"][0]
- return video
-
-def get_video(video_url, location, filename = 'audio'):
- if not os.path.exists(location):
- os.makedirs(location)
- video_filename = location + filename + '.mp4'
- audio_filename = location + filename + '.mp3'
- print('[INFO] downloading video...')
- video = YouTube(video_url).streams.filter(file_extension = 'mp4').first().download(filename = video_filename)
- video = VideoFileClip(video_filename)
- print('[INFO] extracting audio from video...')
- video.audio.write_audiofile(audio_filename)
- #os.remove(video_filename)
-
- return audio_filename
-
-############################################################
-
-
-############### Audio ###############
-def chunk_audio(filename, segment_length, output_dir):
- if not os.path.isdir(output_dir):
- os.mkdir(output_dir)
- audio, sr = librosa.load(filename, sr = 44100)
- duration = librosa.get_duration(y = audio, sr = sr)
- num_segments = int(duration / segment_length) + 1
- print(f'[INFO] Chunking {num_segments} chunks...')
-
- audio_files = []
-
- for i in range(num_segments):
- start = i*segment_length*sr
- end = (i+1)*segment_length*sr
- segment = audio[start:end]
- sf.write(os.path.join(output_dir, f"segment_{i}.mp3"), segment, sr)
- audio_files.append(output_dir + f'segment_{i}.mp3')
-
- print(audio_files)
- #os.remove(filename)
- return audio_files
-
-def transcribe_audio(audio_files, output_file = None, model = whisper.load_model('base', device = device)):
- print('[INFO] converting audio to text...')
- transcripts = []
- model.to(device)
- for audio_file in audio_files:
- response = model.transcribe(audio_file)
- transcripts.append(response['text'])
-
- if output_file is not None:
- with open(output_file, 'w') as f:
- for transcript in transcripts:
- f.write(transcript + '\n')
-
- return transcripts
-
-############################################################
-
-
-############################################################
-
-############### Compile all functions ###############
-def summarize_youtube_video(video_url, outputs_dir):
- print(f'[INFO] running on {device}')
- raw_audio_dir = f'{outputs_dir}/raw_audio/'
- chunks_dir = f'{outputs_dir}/chunks/'
- transcripts_file = f'{outputs_dir}/transcripts.txt'
- summary_file = f'{outputs_dir}/summary.txt'
- segment_length = 60*10
-
- if os.path.exists(outputs_dir):
- shutil.rmtree(outputs_dir)
- os.mkdir(outputs_dir)
-
- audio_filename = get_video(video_url, raw_audio_dir)
- chunked_audio_files = chunk_audio(audio_filename, segment_length, chunks_dir)
- transcriptions = transcribe_audio(chunked_audio_files, transcripts_file)
-
-
- # splitting transcription into sentences
- sentences = []
- for transcript in transcriptions:
- sentences += transcript.split('.')
-
- sentences_len = [len(sentence) for sentence in sentences]
- sentence_mean_length = sum(sentences_len) // len(sentences_len)
-
- num_sentences_per_step = int(1600 / (sentence_mean_length))
- num_steps = (len(sentences) // num_sentences_per_step) + (len(sentences) % num_sentences_per_step != 0)
-
- print(f"""
- [INFO] sentences_len : {len(sentences_len)}
- [INFO] sentence_mean_length : {sentence_mean_length},
- [INFO] num_sentences_per_step : {num_sentences_per_step},
- [INFO] num_steps : {num_steps}
- """)
-
- summarizer = pipeline('summarization', model = checkpoint, tokenizer = checkpoint, max_length = 200, truncation = True)
-
- summaries = []
-
- for i in tqdm(range(num_steps)):
- chunk = ' '.join(sentences[num_sentences_per_step*i : num_sentences_per_step*(i+1)])
- summary = summarizer(chunk, do_sample = False)[0]['summary_text']
- summaries.append(summary)
-
- complete_summary = ' '.join(summaries)
- with open(summary_file, 'w') as f:
- f.write(complete_summary)
-
- with open(transcripts_file, 'r') as f:
- complete_transcript = f.read()
- return {'transcript': complete_transcript, 'summary' : complete_summary}
-############################################################
-
-
-
-############ File Summarize ############
-
-def extract_text_pdf(file_location = 'TranscriptApi/static/files/temp.pdf'):
- reader = PdfReader(file_location)
- text = ""
- for page in reader.pages:
- text += page.extract_text()
- return text;
-
-def extract_text_txt(file_location = 'TranscriptApi/static/files/temp.txt'):
- with open(file_location, "r") as f:
- text = f.read()
- return text
-
-
-
-
-def summarize_string(text : str):
- sentences = text.split('.')
-
- summarizer = pipeline('summarization', model = checkpoint, tokenizer = checkpoint, max_length = 200, truncation = True, device = 0)
-
- sentences_len = [len(sentence) for sentence in sentences]
- sentence_mean_length = sum(sentences_len) // len(sentences_len)
-
- num_sentences_per_step = int(1600 / (sentence_mean_length))
- num_steps = (len(sentences) // num_sentences_per_step) + (len(sentences) % num_sentences_per_step != 0)
-
- print(f"""
- [INFO] sentences_len : {len(sentences_len)}
- [INFO] sentence_mean_length : {sentence_mean_length},
- [INFO] num_sentences_per_step : {num_sentences_per_step},
- [INFO] num_steps : {num_steps}
- """)
-
-
- summaries = []
- for i in tqdm(range(num_steps)):
- chunk = ' '.join(sentences[num_sentences_per_step*i : num_sentences_per_step*(i+1)])
- summary = summarizer(chunk, do_sample = False)[0]['summary_text']
- summaries.append(summary)
-
- complete_summary = ' '.join(summaries)
- return complete_summary
-
-
-################################################
-
-
-def summarize_file(file_location, file_extension, working_dir = "TranscriptApi/static/files"):
- # _, file_extension = os.path.splitext(file_location)
- text = ""
- if file_extension == 'pdf':
- text = extract_text_pdf(file_location)
- elif file_extension == 'txt':
- text = extract_text_txt(file_location)
- else:
- return "[ERROR]"
-
- if os.path.exists(working_dir):
- shutil.rmtree(working_dir)
- os.mkdir(working_dir)
- return [text, summarize_string(text)]
-
-def answer(question: str, context : str):
- # qa = pipeline(task = "question-answering", model = "Th3BossC/QuestionAnsweringModel", tokenizer = "Th3BossC/QuestionAnsweringModel")
- qa = pipeline(task = "question-answering", model = "deepset/roberta-base-squad2")
- return qa(question = question, context = context)['answer']
\ No newline at end of file
diff --git a/spaces/Tristan/static-rlhf-interface/collect.py b/spaces/Tristan/static-rlhf-interface/collect.py
deleted file mode 100644
index 3578b8dc801f5fab327d7af301557f1c5df94131..0000000000000000000000000000000000000000
--- a/spaces/Tristan/static-rlhf-interface/collect.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import argparse
-from os import path
-
-import boto3
-from boto.mturk.question import ExternalQuestion
-from config import MTURK_KEY, MTURK_SECRET
-
-parser = argparse.ArgumentParser()
-parser.add_argument("--mturk_region", default="us-east-1", help="The region for mturk (default: us-east-1)")
-parser.add_argument(
- "--space_name",
- default="Tristan/static-rlhf-interface",
- help="Name of the accompanying Hugging Face space (default: huggingface/rlhf-interface)",
-)
-parser.add_argument("--num_hits", type=int, default=5, help="The number of HITs.")
-parser.add_argument(
- "--num_assignments", type=int, default=1, help="The number of times that the HIT can be accepted and completed."
-)
-parser.add_argument(
- "--live_mode",
- action="store_true",
- help="""
- Whether to run in live mode with real turkers. This will charge your account money.
- If you don't use this flag, the HITs will be deployed on the sandbox version of mturk,
- which will not charge your account money.
- """,
-)
-parser.add_argument(
- "--refresh_qualification_test",
- action="store_true",
- help="""
- Whether to refresh the qualification test. If you've made edits to the test
- xml files, it is necessary to do this.
- """,
-)
-parser.add_argument(
- "--custom_qualification_test",
- action="store_true",
- help="""
- Whether to require the custom qualification test.
- """,
-)
-parser.add_argument(
- "--master_turkers",
- action="store_true",
- help="""
- Whether to only use turkers with the master qualification.
- """,
-)
-parser.add_argument(
- "--us_turkers",
- action="store_true",
- help="""
- Whether to only use US-based turkers.
- """,
-)
-
-args = parser.parse_args()
-
-MTURK_URL = f"https://mturk-requester{'' if args.live_mode else '-sandbox'}.{args.mturk_region}.amazonaws.com"
-
-mturk = boto3.client(
- "mturk",
- aws_access_key_id=MTURK_KEY,
- aws_secret_access_key=MTURK_SECRET,
- region_name=args.mturk_region,
- endpoint_url=MTURK_URL,
-)
-
-# This is the URL that makes the space embeddable in an mturk iframe
-question = ExternalQuestion(f"https://hf.space/embed/{args.space_name}/+?__theme=light", frame_height=600)
-
-qualification_requirements=[]
-
-if args.master_turkers:
- qualification_requirements.append({
- QualificationTypeId: '2F1QJWKUDD8XADTFD2Q0G6UTO95ALH',
- Comparator: 'Exists'
- })
-
-if args.us_turkers:
- qualification_requirements.append({
- QualificationTypeId: '00000000000000000071',
- Comparator: 'In',
- LocaleValues: [
- { Country: "US" },
- ]
- })
-
-if args.custom_qualification_test:
- qualification_type_id = (
- open("qualification_type_id.txt", "r").read() if path.exists("qualification_type_id.txt") else None
- )
- if args.refresh_qualification_test or qualification_type_id is None:
- if qualification_type_id is not None:
- mturk.delete_qualification_type(QualificationTypeId=qualification_type_id)
- response = mturk.create_qualification_type(
- Name="rlhf--qualification",
- Keywords="RLHF qualification",
- Description="Qualification test for RLHF task.",
- QualificationTypeStatus="Active",
- Test=open("qualification_questions.xml", mode="r").read(),
- AnswerKey=open("qualification_answers.xml", mode="r").read(),
- TestDurationInSeconds=3600,
- AutoGranted=False,
- )
- qualification_type_id = response["QualificationType"]["QualificationTypeId"]
- open("qualification_type_id.txt", "w+").write(qualification_type_id)
- qualification_requirements.append({
- "QualificationTypeId": qualification_type_id,
- "Comparator": "Exists",
- "RequiredToPreview": False,
- "ActionsGuarded": "Accept",
- })
-
-for i in range(args.num_hits):
- new_hit = mturk.create_hit(
- Title="RLHF HIT",
- Description="Interact with an AI",
- Keywords="chatbot",
- Reward="0.25",
- MaxAssignments=args.num_assignments,
- LifetimeInSeconds=172800,
- AssignmentDurationInSeconds=600,
- AutoApprovalDelayInSeconds=14400,
- Question=question.get_as_xml(),
- QualificationRequirements=qualification_requirements,
- )
-
-print(
- f"HIT Group Link: https://worker{'' if args.live_mode else 'sandbox'}.mturk.com/mturk/preview?groupId="
- + new_hit["HIT"]["HITGroupId"]
-)
diff --git a/spaces/VIOD/Real-CUGAN/README.md b/spaces/VIOD/Real-CUGAN/README.md
deleted file mode 100644
index d673114edadba73e80f33a3c71bc0dbee8758cc8..0000000000000000000000000000000000000000
--- a/spaces/VIOD/Real-CUGAN/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Real CUGAN
-emoji: 🐢
-colorFrom: gray
-colorTo: green
-sdk: gradio
-sdk_version: 3.6
-app_file: app.py
-pinned: false
-license: gpl-3.0
-duplicated_from: DianXian/Real-CUGAN
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/VeryYouQ/dis-background-removal/README.md b/spaces/VeryYouQ/dis-background-removal/README.md
deleted file mode 100644
index 72b7c3f161a801956879acee54685db041448c2d..0000000000000000000000000000000000000000
--- a/spaces/VeryYouQ/dis-background-removal/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: DIS Background Removal
-emoji: 🔥 🌠 🏰
-colorFrom: yellow
-colorTo: blue
-sdk: gradio
-sdk_version: 3.1.4
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: doevent/dis-background-removal
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Wanlau/sovits-4.0_datealive/train.py b/spaces/Wanlau/sovits-4.0_datealive/train.py
deleted file mode 100644
index e499528a342c14f33eec8735d32cd3971ee6470e..0000000000000000000000000000000000000000
--- a/spaces/Wanlau/sovits-4.0_datealive/train.py
+++ /dev/null
@@ -1,310 +0,0 @@
-import logging
-import multiprocessing
-import time
-
-logging.getLogger('matplotlib').setLevel(logging.WARNING)
-import os
-import json
-import argparse
-import itertools
-import math
-import torch
-from torch import nn, optim
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard import SummaryWriter
-import torch.multiprocessing as mp
-import torch.distributed as dist
-from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.cuda.amp import autocast, GradScaler
-
-import modules.commons as commons
-import utils
-from data_utils import TextAudioSpeakerLoader, TextAudioCollate
-from models import (
- SynthesizerTrn,
- MultiPeriodDiscriminator,
-)
-from modules.losses import (
- kl_loss,
- generator_loss, discriminator_loss, feature_loss
-)
-
-from modules.mel_processing import mel_spectrogram_torch, spec_to_mel_torch
-
-torch.backends.cudnn.benchmark = True
-global_step = 0
-start_time = time.time()
-
-# os.environ['TORCH_DISTRIBUTED_DEBUG'] = 'INFO'
-
-
-def main():
- """Assume Single Node Multi GPUs Training Only"""
- assert torch.cuda.is_available(), "CPU training is not allowed."
- hps = utils.get_hparams()
-
- n_gpus = torch.cuda.device_count()
- os.environ['MASTER_ADDR'] = 'localhost'
- os.environ['MASTER_PORT'] = hps.train.port
-
- mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
-
-
-def run(rank, n_gpus, hps):
- global global_step
- if rank == 0:
- logger = utils.get_logger(hps.model_dir)
- logger.info(hps)
- utils.check_git_hash(hps.model_dir)
- writer = SummaryWriter(log_dir=hps.model_dir)
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
-
- # for pytorch on win, backend use gloo
- dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank)
- torch.manual_seed(hps.train.seed)
- torch.cuda.set_device(rank)
- collate_fn = TextAudioCollate()
- train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps)
- num_workers = 5 if multiprocessing.cpu_count() > 4 else multiprocessing.cpu_count()
- train_loader = DataLoader(train_dataset, num_workers=num_workers, shuffle=False, pin_memory=True,
- batch_size=hps.train.batch_size, collate_fn=collate_fn)
- if rank == 0:
- eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps)
- eval_loader = DataLoader(eval_dataset, num_workers=1, shuffle=False,
- batch_size=1, pin_memory=False,
- drop_last=False, collate_fn=collate_fn)
-
- net_g = SynthesizerTrn(
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- **hps.model).cuda(rank)
- net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
- optim_g = torch.optim.AdamW(
- net_g.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- optim_d = torch.optim.AdamW(
- net_d.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- net_g = DDP(net_g, device_ids=[rank]) # , find_unused_parameters=True)
- net_d = DDP(net_d, device_ids=[rank])
-
- skip_optimizer = False
- try:
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g,
- optim_g, skip_optimizer)
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d,
- optim_d, skip_optimizer)
- epoch_str = max(epoch_str, 1)
- global_step = (epoch_str - 1) * len(train_loader)
- except:
- print("load old checkpoint failed...")
- epoch_str = 1
- global_step = 0
- if skip_optimizer:
- epoch_str = 1
- global_step = 0
-
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
-
- scaler = GradScaler(enabled=hps.train.fp16_run)
-
- for epoch in range(epoch_str, hps.train.epochs + 1):
- if rank == 0:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler,
- [train_loader, eval_loader], logger, [writer, writer_eval])
- else:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler,
- [train_loader, None], None, None)
- scheduler_g.step()
- scheduler_d.step()
-
-
-def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
- net_g, net_d = nets
- optim_g, optim_d = optims
- scheduler_g, scheduler_d = schedulers
- train_loader, eval_loader = loaders
- if writers is not None:
- writer, writer_eval = writers
-
- # train_loader.batch_sampler.set_epoch(epoch)
- global global_step
-
- net_g.train()
- net_d.train()
- for batch_idx, items in enumerate(train_loader):
- c, f0, spec, y, spk, lengths, uv = items
- g = spk.cuda(rank, non_blocking=True)
- spec, y = spec.cuda(rank, non_blocking=True), y.cuda(rank, non_blocking=True)
- c = c.cuda(rank, non_blocking=True)
- f0 = f0.cuda(rank, non_blocking=True)
- uv = uv.cuda(rank, non_blocking=True)
- lengths = lengths.cuda(rank, non_blocking=True)
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
-
- with autocast(enabled=hps.train.fp16_run):
- y_hat, ids_slice, z_mask, \
- (z, z_p, m_p, logs_p, m_q, logs_q), pred_lf0, norm_lf0, lf0 = net_g(c, f0, uv, spec, g=g, c_lengths=lengths,
- spec_lengths=lengths)
-
- y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
- y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
-
- # Discriminator
- y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
-
- with autocast(enabled=False):
- loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
- loss_disc_all = loss_disc
-
- optim_d.zero_grad()
- scaler.scale(loss_disc_all).backward()
- scaler.unscale_(optim_d)
- grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
- scaler.step(optim_d)
-
- with autocast(enabled=hps.train.fp16_run):
- # Generator
- y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
- with autocast(enabled=False):
- loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
- loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
- loss_fm = feature_loss(fmap_r, fmap_g)
- loss_gen, losses_gen = generator_loss(y_d_hat_g)
- loss_lf0 = F.mse_loss(pred_lf0, lf0)
- loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl + loss_lf0
- optim_g.zero_grad()
- scaler.scale(loss_gen_all).backward()
- scaler.unscale_(optim_g)
- grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
- scaler.step(optim_g)
- scaler.update()
-
- if rank == 0:
- if global_step % hps.train.log_interval == 0:
- lr = optim_g.param_groups[0]['lr']
- losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_kl]
- logger.info('Train Epoch: {} [{:.0f}%]'.format(
- epoch,
- 100. * batch_idx / len(train_loader)))
- logger.info(f"Losses: {[x.item() for x in losses]}, step: {global_step}, lr: {lr}")
-
- scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr,
- "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
- scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl": loss_kl,
- "loss/g/lf0": loss_lf0})
-
- # scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
- # scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
- # scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
- image_dict = {
- "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
- "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
- "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
- "all/lf0": utils.plot_data_to_numpy(lf0[0, 0, :].cpu().numpy(),
- pred_lf0[0, 0, :].detach().cpu().numpy()),
- "all/norm_lf0": utils.plot_data_to_numpy(lf0[0, 0, :].cpu().numpy(),
- norm_lf0[0, 0, :].detach().cpu().numpy())
- }
-
- utils.summarize(
- writer=writer,
- global_step=global_step,
- images=image_dict,
- scalars=scalar_dict
- )
-
- if global_step % hps.train.eval_interval == 0:
- evaluate(hps, net_g, eval_loader, writer_eval)
- utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,
- os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
- utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch,
- os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
- keep_ckpts = getattr(hps.train, 'keep_ckpts', 0)
- if keep_ckpts > 0:
- utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True)
-
- global_step += 1
-
- if rank == 0:
- global start_time
- now = time.time()
- durtaion = format(now - start_time, '.2f')
- logger.info(f'====> Epoch: {epoch}, cost {durtaion} s')
- start_time = now
-
-
-def evaluate(hps, generator, eval_loader, writer_eval):
- generator.eval()
- image_dict = {}
- audio_dict = {}
- with torch.no_grad():
- for batch_idx, items in enumerate(eval_loader):
- c, f0, spec, y, spk, _, uv = items
- g = spk[:1].cuda(0)
- spec, y = spec[:1].cuda(0), y[:1].cuda(0)
- c = c[:1].cuda(0)
- f0 = f0[:1].cuda(0)
- uv= uv[:1].cuda(0)
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
- y_hat = generator.module.infer(c, f0, uv, g=g)
-
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1).float(),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
-
- audio_dict.update({
- f"gen/audio_{batch_idx}": y_hat[0],
- f"gt/audio_{batch_idx}": y[0]
- })
- image_dict.update({
- f"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()),
- "gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())
- })
- utils.summarize(
- writer=writer_eval,
- global_step=global_step,
- images=image_dict,
- audios=audio_dict,
- audio_sampling_rate=hps.data.sampling_rate
- )
- generator.train()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/WindVChen/INR-Harmon/model/base/basic_blocks.py b/spaces/WindVChen/INR-Harmon/model/base/basic_blocks.py
deleted file mode 100644
index cf62bec8deb6f23d23b94aaec27448adb4d03672..0000000000000000000000000000000000000000
--- a/spaces/WindVChen/INR-Harmon/model/base/basic_blocks.py
+++ /dev/null
@@ -1,366 +0,0 @@
-import torch
-from torch import nn as nn
-import numpy as np
-
-
-def hyper_weight_init(m, in_features_main_net, activation):
- if hasattr(m, 'weight'):
- nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity='relu', mode='fan_in')
- m.weight.data = m.weight.data / 1.e2
-
- if hasattr(m, 'bias'):
- with torch.no_grad():
- if activation == 'sine':
- m.bias.uniform_(-np.sqrt(6 / in_features_main_net) / 30, np.sqrt(6 / in_features_main_net) / 30)
- elif activation == 'leakyrelu_pe':
- m.bias.uniform_(-np.sqrt(6 / in_features_main_net), np.sqrt(6 / in_features_main_net))
- else:
- raise NotImplementedError
-
-
-class ConvBlock(nn.Module):
- def __init__(
- self,
- in_channels, out_channels,
- kernel_size=4, stride=2, padding=1,
- norm_layer=nn.BatchNorm2d, activation=nn.ELU,
- bias=True,
- ):
- super(ConvBlock, self).__init__()
- self.block = nn.Sequential(
- nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
- norm_layer(out_channels) if norm_layer is not None else nn.Identity(),
- activation(),
- )
-
- def forward(self, x):
- return self.block(x)
-
-
-class MaxPoolDownSize(nn.Module):
- def __init__(self, in_channels, mid_channels, out_channels, depth):
- super(MaxPoolDownSize, self).__init__()
- self.depth = depth
- self.reduce_conv = ConvBlock(in_channels, mid_channels, kernel_size=1, stride=1, padding=0)
- self.convs = nn.ModuleList([
- ConvBlock(mid_channels, out_channels, kernel_size=3, stride=1, padding=1)
- for conv_i in range(depth)
- ])
- self.pool2d = nn.MaxPool2d(kernel_size=2)
-
- def forward(self, x):
- outputs = []
-
- output = self.reduce_conv(x)
-
- for conv_i, conv in enumerate(self.convs):
- output = output if conv_i == 0 else self.pool2d(output)
- outputs.append(conv(output))
-
- return outputs
-
-
-class convParams(nn.Module):
- def __init__(self, input_dim, INR_in_out, opt, hidden_mlp_num, hidden_dim=512, toRGB=False):
- super(convParams, self).__init__()
- self.INR_in_out = INR_in_out
- self.cont_split_weight = []
- self.cont_split_bias = []
- self.hidden_mlp_num = hidden_mlp_num
- self.param_factorize_dim = opt.param_factorize_dim
- output_dim = self.cal_params_num(INR_in_out, hidden_mlp_num, toRGB)
- self.output_dim = output_dim
- self.toRGB = toRGB
- self.cont_extraction_net = nn.Sequential(
- nn.Conv2d(input_dim, hidden_dim, kernel_size=3, stride=2, padding=1, bias=False),
- # nn.BatchNorm2d(hidden_dim),
- nn.ReLU(inplace=True),
- nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False),
- # nn.BatchNorm2d(hidden_dim),
- nn.ReLU(inplace=True),
- nn.Conv2d(hidden_dim, output_dim, kernel_size=1, stride=1, padding=0, bias=True),
- )
-
- self.cont_extraction_net[-1].apply(lambda m: hyper_weight_init(m, INR_in_out[0], opt.activation))
-
- self.basic_params = nn.ParameterList()
- if opt.param_factorize_dim > 0:
- for id in range(self.hidden_mlp_num + 1):
- if id == 0:
- inp, outp = self.INR_in_out[0], self.INR_in_out[1]
- else:
- inp, outp = self.INR_in_out[1], self.INR_in_out[1]
- self.basic_params.append(nn.Parameter(torch.randn(1, 1, 1, inp, outp)))
-
- if toRGB:
- self.basic_params.append(nn.Parameter(torch.randn(1, 1, 1, self.INR_in_out[1], 3)))
-
- def forward(self, feat, outMore=False):
- cont_params = self.cont_extraction_net(feat)
- out_mlp = self.to_mlp(cont_params)
- if outMore:
- return out_mlp, cont_params
- return out_mlp
-
- def cal_params_num(self, INR_in_out, hidden_mlp_num, toRGB=False):
- cont_params = 0
- start = 0
- if self.param_factorize_dim == -1:
- cont_params += INR_in_out[0] * INR_in_out[1] + INR_in_out[1]
- self.cont_split_weight.append([start, cont_params - INR_in_out[1]])
- self.cont_split_bias.append([cont_params - INR_in_out[1], cont_params])
- start = cont_params
-
- for id in range(hidden_mlp_num):
- cont_params += INR_in_out[1] * INR_in_out[1] + INR_in_out[1]
- self.cont_split_weight.append([start, cont_params - INR_in_out[1]])
- self.cont_split_bias.append([cont_params - INR_in_out[1], cont_params])
- start = cont_params
-
- if toRGB:
- cont_params += INR_in_out[1] * 3 + 3
- self.cont_split_weight.append([start, cont_params - 3])
- self.cont_split_bias.append([cont_params - 3, cont_params])
-
- elif self.param_factorize_dim > 0:
- cont_params += INR_in_out[0] * self.param_factorize_dim + self.param_factorize_dim * INR_in_out[1] + \
- INR_in_out[1]
- self.cont_split_weight.append(
- [start, start + INR_in_out[0] * self.param_factorize_dim, cont_params - INR_in_out[1]])
- self.cont_split_bias.append([cont_params - INR_in_out[1], cont_params])
- start = cont_params
-
- for id in range(hidden_mlp_num):
- cont_params += INR_in_out[1] * self.param_factorize_dim + self.param_factorize_dim * INR_in_out[1] + \
- INR_in_out[1]
- self.cont_split_weight.append(
- [start, start + INR_in_out[1] * self.param_factorize_dim, cont_params - INR_in_out[1]])
- self.cont_split_bias.append([cont_params - INR_in_out[1], cont_params])
- start = cont_params
-
- if toRGB:
- cont_params += INR_in_out[1] * self.param_factorize_dim + self.param_factorize_dim * 3 + 3
- self.cont_split_weight.append(
- [start, start + INR_in_out[1] * self.param_factorize_dim, cont_params - 3])
- self.cont_split_bias.append([cont_params - 3, cont_params])
-
- return cont_params
-
- def to_mlp(self, params):
- all_weight_bias = []
- if self.param_factorize_dim == -1:
- for id in range(self.hidden_mlp_num + 1):
- if id == 0:
- inp, outp = self.INR_in_out[0], self.INR_in_out[1]
- else:
- inp, outp = self.INR_in_out[1], self.INR_in_out[1]
- weight = params[:, self.cont_split_weight[id][0]:self.cont_split_weight[id][1], :, :]
- weight = weight.permute(0, 2, 3, 1).contiguous().view(weight.shape[0], *weight.shape[2:],
- inp, outp)
-
- bias = params[:, self.cont_split_bias[id][0]:self.cont_split_bias[id][1], :, :]
- bias = bias.permute(0, 2, 3, 1).contiguous().view(bias.shape[0], *bias.shape[2:], 1, outp)
- all_weight_bias.append([weight, bias])
-
- if self.toRGB:
- inp, outp = self.INR_in_out[1], 3
- weight = params[:, self.cont_split_weight[-1][0]:self.cont_split_weight[-1][1], :, :]
- weight = weight.permute(0, 2, 3, 1).contiguous().view(weight.shape[0], *weight.shape[2:],
- inp, outp)
-
- bias = params[:, self.cont_split_bias[-1][0]:self.cont_split_bias[-1][1], :, :]
- bias = bias.permute(0, 2, 3, 1).contiguous().view(bias.shape[0], *bias.shape[2:], 1, outp)
- all_weight_bias.append([weight, bias])
-
- return all_weight_bias
-
- else:
- for id in range(self.hidden_mlp_num + 1):
- if id == 0:
- inp, outp = self.INR_in_out[0], self.INR_in_out[1]
- else:
- inp, outp = self.INR_in_out[1], self.INR_in_out[1]
- weight1 = params[:, self.cont_split_weight[id][0]:self.cont_split_weight[id][1], :, :]
- weight1 = weight1.permute(0, 2, 3, 1).contiguous().view(weight1.shape[0], *weight1.shape[2:],
- inp, self.param_factorize_dim)
-
- weight2 = params[:, self.cont_split_weight[id][1]:self.cont_split_weight[id][2], :, :]
- weight2 = weight2.permute(0, 2, 3, 1).contiguous().view(weight2.shape[0], *weight2.shape[2:],
- self.param_factorize_dim, outp)
-
- bias = params[:, self.cont_split_bias[id][0]:self.cont_split_bias[id][1], :, :]
- bias = bias.permute(0, 2, 3, 1).contiguous().view(bias.shape[0], *bias.shape[2:], 1, outp)
-
- all_weight_bias.append([torch.tanh(torch.matmul(weight1, weight2)) * self.basic_params[id], bias])
-
- if self.toRGB:
- inp, outp = self.INR_in_out[1], 3
- weight1 = params[:, self.cont_split_weight[-1][0]:self.cont_split_weight[-1][1], :, :]
- weight1 = weight1.permute(0, 2, 3, 1).contiguous().view(weight1.shape[0], *weight1.shape[2:],
- inp, self.param_factorize_dim)
-
- weight2 = params[:, self.cont_split_weight[-1][1]:self.cont_split_weight[-1][2], :, :]
- weight2 = weight2.permute(0, 2, 3, 1).contiguous().view(weight2.shape[0], *weight2.shape[2:],
- self.param_factorize_dim, outp)
-
- bias = params[:, self.cont_split_bias[-1][0]:self.cont_split_bias[-1][1], :, :]
- bias = bias.permute(0, 2, 3, 1).contiguous().view(bias.shape[0], *bias.shape[2:], 1, outp)
-
- all_weight_bias.append([torch.tanh(torch.matmul(weight1, weight2)) * self.basic_params[-1], bias])
-
- return all_weight_bias
-
-
-class lineParams(nn.Module):
- def __init__(self, input_dim, INR_in_out, input_resolution, opt, hidden_mlp_num, toRGB=False,
- hidden_dim=512):
- super(lineParams, self).__init__()
- self.INR_in_out = INR_in_out
- self.app_split_weight = []
- self.app_split_bias = []
- self.toRGB = toRGB
- self.hidden_mlp_num = hidden_mlp_num
- self.param_factorize_dim = opt.param_factorize_dim
- output_dim = self.cal_params_num(INR_in_out, hidden_mlp_num)
- self.output_dim = output_dim
-
- self.compress_layer = nn.Sequential(
- nn.Linear(input_resolution, 64, bias=False),
- nn.BatchNorm1d(input_dim),
- nn.ReLU(inplace=True),
- nn.Linear(64, 1, bias=True)
- )
-
- self.app_extraction_net = nn.Sequential(
- nn.Linear(input_dim, hidden_dim, bias=False),
- # nn.BatchNorm1d(hidden_dim),
- nn.ReLU(inplace=True),
- nn.Linear(hidden_dim, hidden_dim, bias=False),
- # nn.BatchNorm1d(hidden_dim),
- nn.ReLU(inplace=True),
- nn.Linear(hidden_dim, output_dim, bias=True)
- )
-
- self.app_extraction_net[-1].apply(lambda m: hyper_weight_init(m, INR_in_out[0], opt.activation))
-
- self.basic_params = nn.ParameterList()
- if opt.param_factorize_dim > 0:
- for id in range(self.hidden_mlp_num + 1):
- if id == 0:
- inp, outp = self.INR_in_out[0], self.INR_in_out[1]
- else:
- inp, outp = self.INR_in_out[1], self.INR_in_out[1]
- self.basic_params.append(nn.Parameter(torch.randn(1, inp, outp)))
- if toRGB:
- self.basic_params.append(nn.Parameter(torch.randn(1, self.INR_in_out[1], 3)))
-
- def forward(self, feat):
- app_params = self.app_extraction_net(self.compress_layer(torch.flatten(feat, 2)).squeeze(-1))
- out_mlp = self.to_mlp(app_params)
- return out_mlp, app_params
-
- def cal_params_num(self, INR_in_out, hidden_mlp_num):
- app_params = 0
- start = 0
- if self.param_factorize_dim == -1:
- app_params += INR_in_out[0] * INR_in_out[1] + INR_in_out[1]
- self.app_split_weight.append([start, app_params - INR_in_out[1]])
- self.app_split_bias.append([app_params - INR_in_out[1], app_params])
- start = app_params
-
- for id in range(hidden_mlp_num):
- app_params += INR_in_out[1] * INR_in_out[1] + INR_in_out[1]
- self.app_split_weight.append([start, app_params - INR_in_out[1]])
- self.app_split_bias.append([app_params - INR_in_out[1], app_params])
- start = app_params
-
- if self.toRGB:
- app_params += INR_in_out[1] * 3 + 3
- self.app_split_weight.append([start, app_params - 3])
- self.app_split_bias.append([app_params - 3, app_params])
-
- elif self.param_factorize_dim > 0:
- app_params += INR_in_out[0] * self.param_factorize_dim + self.param_factorize_dim * INR_in_out[1] + \
- INR_in_out[1]
- self.app_split_weight.append([start, start + INR_in_out[0] * self.param_factorize_dim,
- app_params - INR_in_out[1]])
- self.app_split_bias.append([app_params - INR_in_out[1], app_params])
- start = app_params
-
- for id in range(hidden_mlp_num):
- app_params += INR_in_out[1] * self.param_factorize_dim + self.param_factorize_dim * INR_in_out[1] + \
- INR_in_out[1]
- self.app_split_weight.append(
- [start, start + INR_in_out[1] * self.param_factorize_dim, app_params - INR_in_out[1]])
- self.app_split_bias.append([app_params - INR_in_out[1], app_params])
- start = app_params
-
- if self.toRGB:
- app_params += INR_in_out[1] * self.param_factorize_dim + self.param_factorize_dim * 3 + 3
- self.app_split_weight.append([start, start + INR_in_out[1] * self.param_factorize_dim,
- app_params - 3])
- self.app_split_bias.append([app_params - 3, app_params])
-
- return app_params
-
- def to_mlp(self, params):
- all_weight_bias = []
- if self.param_factorize_dim == -1:
- for id in range(self.hidden_mlp_num + 1):
- if id == 0:
- inp, outp = self.INR_in_out[0], self.INR_in_out[1]
- else:
- inp, outp = self.INR_in_out[1], self.INR_in_out[1]
- weight = params[:, self.app_split_weight[id][0]:self.app_split_weight[id][1]]
- weight = weight.view(weight.shape[0], inp, outp)
-
- bias = params[:, self.app_split_bias[id][0]:self.app_split_bias[id][1]]
- bias = bias.view(bias.shape[0], 1, outp)
-
- all_weight_bias.append([weight, bias])
-
- if self.toRGB:
- id = -1
- inp, outp = self.INR_in_out[1], 3
- weight = params[:, self.app_split_weight[id][0]:self.app_split_weight[id][1]]
- weight = weight.view(weight.shape[0], inp, outp)
-
- bias = params[:, self.app_split_bias[id][0]:self.app_split_bias[id][1]]
- bias = bias.view(bias.shape[0], 1, outp)
-
- all_weight_bias.append([weight, bias])
-
- return all_weight_bias
-
- else:
- for id in range(self.hidden_mlp_num + 1):
- if id == 0:
- inp, outp = self.INR_in_out[0], self.INR_in_out[1]
- else:
- inp, outp = self.INR_in_out[1], self.INR_in_out[1]
- weight1 = params[:, self.app_split_weight[id][0]:self.app_split_weight[id][1]]
- weight1 = weight1.view(weight1.shape[0], inp, self.param_factorize_dim)
-
- weight2 = params[:, self.app_split_weight[id][1]:self.app_split_weight[id][2]]
- weight2 = weight2.view(weight2.shape[0], self.param_factorize_dim, outp)
-
- bias = params[:, self.app_split_bias[id][0]:self.app_split_bias[id][1]]
- bias = bias.view(bias.shape[0], 1, outp)
-
- all_weight_bias.append([torch.tanh(torch.matmul(weight1, weight2)) * self.basic_params[id], bias])
-
- if self.toRGB:
- id = -1
- inp, outp = self.INR_in_out[1], 3
- weight1 = params[:, self.app_split_weight[id][0]:self.app_split_weight[id][1]]
- weight1 = weight1.view(weight1.shape[0], inp, self.param_factorize_dim)
-
- weight2 = params[:, self.app_split_weight[id][1]:self.app_split_weight[id][2]]
- weight2 = weight2.view(weight2.shape[0], self.param_factorize_dim, outp)
-
- bias = params[:, self.app_split_bias[id][0]:self.app_split_bias[id][1]]
- bias = bias.view(bias.shape[0], 1, outp)
-
- all_weight_bias.append([torch.tanh(torch.matmul(weight1, weight2)) * self.basic_params[id], bias])
-
- return all_weight_bias
diff --git a/spaces/Xeraphinite/Coursera-GPT/README.md b/spaces/Xeraphinite/Coursera-GPT/README.md
deleted file mode 100644
index fa3158b4ecbbf39a9742caed602ab732c6f7944a..0000000000000000000000000000000000000000
--- a/spaces/Xeraphinite/Coursera-GPT/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
----
-license: openrail
-title: Coursera-GPT
-sdk: gradio
-emoji: 📚
-colorFrom: blue
-colorTo: indigo
-pinned: true
----
-# Coursera-GPT
-
-### 🚧 Under Construction 🚧
-
-Help you to write a financial aid to apply a course in Coursera.
-
-帮你写一份基于自身情况的 Financial Aid 申请 Coursera 的课程。
-
-
-### CHANGELOG
-
-
-### TODO
-1. (⭐⭐⭐) 将纯 OpenAI GPT Model 改成基于 Hugging Face 的模型选择,多点选择总不是坏处TT
-2. (⭐⭐⭐) 优化 Prompt, 当前 Prompt 仍然不太合理
-3. (⭐⭐) 撰写洋文 README.md
-4. (⭐⭐) 撰写 Change Log
-5. (⭐) 重写 Gradio 前端, 优化交互界面
-6. (⭐) 改写 Default Params 为 Examples
-
-### Analysis
-
-#### Step 1
-
-- **Education**:受教育程度,其中包括 High School, Some College, College Degree, Master’s/Advanced degree, Other
-- **Annual Income**:收入(当然,你都到申请 Financial Aid 的程度了那肯定是 0 了)
-- **Employment Status**:就业情况,分为 Full Time, Part Time, Unemployed, Student, Other
-- **Reason you applied for aid**: 你申请 Financial Aid 的理由,也就是基于
-
-#### Step 2
-
-- ****
-- ****
\ No newline at end of file
diff --git a/spaces/XzJosh/ShanBao-Bert-VITS2/app.py b/spaces/XzJosh/ShanBao-Bert-VITS2/app.py
deleted file mode 100644
index f2658484e4fd9444f7e4aaf302799f2309530a98..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/ShanBao-Bert-VITS2/app.py
+++ /dev/null
@@ -1,157 +0,0 @@
-import sys, os
-
-if sys.platform == "darwin":
- os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
-
-import logging
-
-logging.getLogger("numba").setLevel(logging.WARNING)
-logging.getLogger("markdown_it").setLevel(logging.WARNING)
-logging.getLogger("urllib3").setLevel(logging.WARNING)
-logging.getLogger("matplotlib").setLevel(logging.WARNING)
-
-logging.basicConfig(level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s")
-
-logger = logging.getLogger(__name__)
-
-import torch
-import argparse
-import commons
-import utils
-from models import SynthesizerTrn
-from text.symbols import symbols
-from text import cleaned_text_to_sequence, get_bert
-from text.cleaner import clean_text
-import gradio as gr
-import webbrowser
-
-
-net_g = None
-
-
-def get_text(text, language_str, hps):
- norm_text, phone, tone, word2ph = clean_text(text, language_str)
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
-
- if hps.data.add_blank:
- phone = commons.intersperse(phone, 0)
- tone = commons.intersperse(tone, 0)
- language = commons.intersperse(language, 0)
- for i in range(len(word2ph)):
- word2ph[i] = word2ph[i] * 2
- word2ph[0] += 1
- bert = get_bert(norm_text, word2ph, language_str)
- del word2ph
-
- assert bert.shape[-1] == len(phone)
-
- phone = torch.LongTensor(phone)
- tone = torch.LongTensor(tone)
- language = torch.LongTensor(language)
-
- return bert, phone, tone, language
-
-def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid):
- global net_g
- bert, phones, tones, lang_ids = get_text(text, "ZH", hps)
- with torch.no_grad():
- x_tst=phones.to(device).unsqueeze(0)
- tones=tones.to(device).unsqueeze(0)
- lang_ids=lang_ids.to(device).unsqueeze(0)
- bert = bert.to(device).unsqueeze(0)
- x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
- del phones
- speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
- audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio
- , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy()
- del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
- return audio
-
-def tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale):
- with torch.no_grad():
- audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker)
- return "Success", (hps.data.sampling_rate, audio)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--model_dir", default="./logs/ShanBao/G_4000.pth", help="path of your model")
- parser.add_argument("--config_dir", default="./configs/config.json", help="path of your config file")
- parser.add_argument("--share", default=False, help="make link public")
- parser.add_argument("-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log")
-
- args = parser.parse_args()
- if args.debug:
- logger.info("Enable DEBUG-LEVEL log")
- logging.basicConfig(level=logging.DEBUG)
- hps = utils.get_hparams_from_file(args.config_dir)
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
- '''
- device = (
- "cuda:0"
- if torch.cuda.is_available()
- else (
- "mps"
- if sys.platform == "darwin" and torch.backends.mps.is_available()
- else "cpu"
- )
- )
- '''
- net_g = SynthesizerTrn(
- len(symbols),
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- n_speakers=hps.data.n_speakers,
- **hps.model).to(device)
- _ = net_g.eval()
-
- _ = utils.load_checkpoint(args.model_dir, net_g, None, skip_optimizer=True)
-
- speaker_ids = hps.data.spk2id
- speakers = list(speaker_ids.keys())
- with gr.Blocks() as app:
- with gr.Row():
- with gr.Column():
- gr.Markdown(value="""
- 【AI扇宝】在线语音合成(Bert-Vits2)\n
- 作者:Xz乔希 https://space.bilibili.com/5859321\n
- 声音归属:扇宝 https://space.bilibili.com/698438232\n
- Bert-VITS2项目:https://github.com/Stardust-minus/Bert-VITS2\n
- 使用本模型请严格遵守法律法规!\n
- 发布二创作品请标注本项目作者及链接、作品使用Bert-VITS2 AI生成!\n
- """)
- text = gr.TextArea(label="Text", placeholder="Input Text Here",
- value="大家好啊,我是扇宝")
- speaker = gr.Dropdown(choices=speakers, value=speakers[0], label='Speaker')
- sdp_ratio = gr.Slider(minimum=0.1, maximum=1, value=0.2, step=0.01, label='SDP/DP混合比')
- noise_scale = gr.Slider(minimum=0.1, maximum=1, value=0.5, step=0.01, label='感情调节')
- noise_scale_w = gr.Slider(minimum=0.1, maximum=1, value=0.9, step=0.01, label='音素长度')
- length_scale = gr.Slider(minimum=0.1, maximum=2, value=1, step=0.01, label='生成长度')
- btn = gr.Button("点击生成", variant="primary")
- with gr.Column():
- text_output = gr.Textbox(label="Message")
- audio_output = gr.Audio(label="Output Audio")
- gr.Markdown(value="""
- 【AI星瞳】https://huggingface.co/spaces/XzJosh/XingTong-Bert-VITS2\n
- 【AI尼奈】https://huggingface.co/spaces/XzJosh/nine1-Bert-VITS2\n
- 【AI剑魔】https://huggingface.co/spaces/XzJosh/Aatrox-Bert-VITS2\n
- 【AI塔菲】https://huggingface.co/spaces/XzJosh/Taffy-Bert-VITS2\n
- 【AI东雪莲】https://huggingface.co/spaces/XzJosh/Azuma-Bert-VITS2\n
- 【AI奶绿】https://huggingface.co/spaces/XzJosh/LAPLACE-Bert-VITS2\n
- 【AI七海】https://huggingface.co/spaces/XzJosh/Nana7mi-Bert-VITS2\n
- 【AI阿梓】https://huggingface.co/spaces/XzJosh/Azusa-Bert-VITS2\n
- 【AI嘉然】https://huggingface.co/spaces/XzJosh/Diana-Bert-VITS2\n
- 【AI向晚】https://huggingface.co/spaces/XzJosh/Ava-Bert-VITS2\n
- 【AI乃琳】https://huggingface.co/spaces/XzJosh/Eileen-Bert-VITS2\n
- 【AI贝拉】https://huggingface.co/spaces/XzJosh/Bella-Bert-VITS2\n
- 【AI珈乐】https://huggingface.co/spaces/XzJosh/Carol-Bert-VITS2\n
- 【AI电棍】https://huggingface.co/spaces/XzJosh/otto-Bert-VITS2\n
- """)
- btn.click(tts_fn,
- inputs=[text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale],
- outputs=[text_output, audio_output])
-
-# webbrowser.open("http://127.0.0.1:6006")
-# app.launch(server_port=6006, show_error=True)
-
- app.launch(show_error=True)
diff --git a/spaces/XzJosh/XingTong-Bert-VITS2/monotonic_align/__init__.py b/spaces/XzJosh/XingTong-Bert-VITS2/monotonic_align/__init__.py
deleted file mode 100644
index 75603d26cf2b8d6196f5a68a89f9e49d8e519bc8..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/XingTong-Bert-VITS2/monotonic_align/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from numpy import zeros, int32, float32
-from torch import from_numpy
-
-from .core import maximum_path_jit
-
-def maximum_path(neg_cent, mask):
- device = neg_cent.device
- dtype = neg_cent.dtype
- neg_cent = neg_cent.data.cpu().numpy().astype(float32)
- path = zeros(neg_cent.shape, dtype=int32)
-
- t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32)
- t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32)
- maximum_path_jit(path, neg_cent, t_t_max, t_s_max)
- return from_numpy(path).to(device=device, dtype=dtype)
diff --git a/spaces/YouLiXiya/Mobile-SAM/segment_anything/segment_anything/mobile_encoder/__init__.py b/spaces/YouLiXiya/Mobile-SAM/segment_anything/segment_anything/mobile_encoder/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Yukki-Yui/moe-tts/commons.py b/spaces/Yukki-Yui/moe-tts/commons.py
deleted file mode 100644
index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000
--- a/spaces/Yukki-Yui/moe-tts/commons.py
+++ /dev/null
@@ -1,172 +0,0 @@
-import math
-import torch
-from torch.nn import functional as F
-import torch.jit
-
-
-def script_method(fn, _rcb=None):
- return fn
-
-
-def script(obj, optimize=True, _frames_up=0, _rcb=None):
- return obj
-
-
-torch.jit.script_method = script_method
-torch.jit.script = script
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size*dilation - dilation)/2)
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def intersperse(lst, item):
- result = [item] * (len(lst) * 2 + 1)
- result[1::2] = lst
- return result
-
-
-def kl_divergence(m_p, logs_p, m_q, logs_q):
- """KL(P||Q)"""
- kl = (logs_q - logs_p) - 0.5
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
- return kl
-
-
-def rand_gumbel(shape):
- """Sample from the Gumbel distribution, protect from overflows."""
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
- return -torch.log(-torch.log(uniform_samples))
-
-
-def rand_gumbel_like(x):
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
- return g
-
-
-def slice_segments(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, :, idx_str:idx_end]
- return ret
-
-
-def rand_slice_segments(x, x_lengths=None, segment_size=4):
- b, d, t = x.size()
- if x_lengths is None:
- x_lengths = t
- ids_str_max = x_lengths - segment_size + 1
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
- ret = slice_segments(x, ids_str, segment_size)
- return ret, ids_str
-
-
-def get_timing_signal_1d(
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
- position = torch.arange(length, dtype=torch.float)
- num_timescales = channels // 2
- log_timescale_increment = (
- math.log(float(max_timescale) / float(min_timescale)) /
- (num_timescales - 1))
- inv_timescales = min_timescale * torch.exp(
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
- signal = F.pad(signal, [0, 0, 0, channels % 2])
- signal = signal.view(1, channels, length)
- return signal
-
-
-def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return x + signal.to(dtype=x.dtype, device=x.device)
-
-
-def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
-
-
-def subsequent_mask(length):
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
- return mask
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
- n_channels_int = n_channels[0]
- in_act = input_a + input_b
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
- acts = t_act * s_act
- return acts
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def shift_1d(x):
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
- return x
-
-
-def sequence_mask(length, max_length=None):
- if max_length is None:
- max_length = length.max()
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
- return x.unsqueeze(0) < length.unsqueeze(1)
-
-
-def generate_path(duration, mask):
- """
- duration: [b, 1, t_x]
- mask: [b, 1, t_y, t_x]
- """
- device = duration.device
-
- b, _, t_y, t_x = mask.shape
- cum_duration = torch.cumsum(duration, -1)
-
- cum_duration_flat = cum_duration.view(b * t_x)
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
- path = path.view(b, t_x, t_y)
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
- path = path.unsqueeze(1).transpose(2,3) * mask
- return path
-
-
-def clip_grad_value_(parameters, clip_value, norm_type=2):
- if isinstance(parameters, torch.Tensor):
- parameters = [parameters]
- parameters = list(filter(lambda p: p.grad is not None, parameters))
- norm_type = float(norm_type)
- if clip_value is not None:
- clip_value = float(clip_value)
-
- total_norm = 0
- for p in parameters:
- param_norm = p.grad.data.norm(norm_type)
- total_norm += param_norm.item() ** norm_type
- if clip_value is not None:
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
- total_norm = total_norm ** (1. / norm_type)
- return total_norm
diff --git a/spaces/Yusin/talking-stable-diffusion/app.py b/spaces/Yusin/talking-stable-diffusion/app.py
deleted file mode 100644
index f55f9b4eb3adb55ff7354e479f8578e9964c6dee..0000000000000000000000000000000000000000
--- a/spaces/Yusin/talking-stable-diffusion/app.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import gradio as gr
-from PIL import Image
-import os
-
-#from diffusers import StableDiffusionPipeline
-whisper = gr.Interface.load(name="spaces/sanchit-gandhi/whisper-large-v2")
-#stable_diffusion = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion")
-stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5")
-### ————————————————————————————————————————
-title="Talking to Stable Diffusion"
-### ————————————————————————————————————————
-def get_images(prompt):
- #gallery_dir = stable_diffusion(prompt, None, None, fn_index=2)
- gallery_dir = stable_diffusion(prompt, fn_index=2)
- return [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)]
-
-
-def translate_better(audio):
- print("""
- —
- Sending audio to Whisper ...
- —
- """)
- transcribe_text_result = whisper(audio, None, "transcribe", fn_index=0)
- translate_text_result = whisper(audio, None, "translate", fn_index=0)
- print("transcript: " + transcribe_text_result)
- print("———————————————————————————————————————————")
- print("translated: " + translate_text_result)
-
- return transcribe_text_result, translate_text_result
-
-
-
-with gr.Blocks() as demo:
- gr.Markdown(
- """
- ## 1. Say what you want:
- """
- )
- with gr.Column():
- with gr.Tab(label="Record audio input", elem_id="record_tab"):
- with gr.Column():
- record_input = gr.Audio(
- source="microphone",
- type="filepath",
- show_label=False,
- elem_id="record_btn"
- )
- with gr.Row():
- audio_r_translate = gr.Button("Check Whisper first", elem_id="check_btn_1")
- audio_r_direct_sd = gr.Button("Generating Images", elem_id="magic_btn_1")
-
- with gr.Accordion(label="Stable Diffusion Settings", elem_id="sd_settings", visible=False):
- with gr.Row():
- guidance_scale = gr.Slider(2, 15, value = 7, label = 'Guidance Scale')
- nb_iterations = gr.Slider(10, 50, value = 25, step = 1, label = 'Steps')
- seed = gr.Slider(label = "Seed", minimum = 0, maximum = 2147483647, step = 1, randomize = True)
-
- gr.Markdown(
- """
- ## 2. Check Whisper output:
- """
- )
-
- with gr.Row():
- transcripted_output = gr.Textbox(
- label="Transcription in your detected spoken language",
- lines=3,
- elem_id="transcripted"
- )
- translated_output = gr.Textbox(
- label="Transcription in your detected spoken language",
- lines=3,
- elem_id="translated"
- )
-
-
-
- gr.Markdown("""
- ## 3. Wait for Stable Diffusion Results about ~10 seconds
- """
- )
-
- sd_output = gr.Gallery().style(grid=2, height="auto")
- audio_r_translate.click(translate_better,
- inputs = [
- record_input
- ],
- outputs = [
- transcripted_output,
- translated_output,
- ])
- audio_r_direct_sd.click(get_images,
- inputs = [
- translated_output
- ],
- outputs = sd_output
- )
-
-if __name__ == "__main__":
- demo.queue(max_size=32, concurrency_count=20).launch()
\ No newline at end of file
diff --git a/spaces/Zenne/chatbot_for_files_langchain/README.md b/spaces/Zenne/chatbot_for_files_langchain/README.md
deleted file mode 100644
index 10b17d86249f0a40591c42860aaf795ca5840073..0000000000000000000000000000000000000000
--- a/spaces/Zenne/chatbot_for_files_langchain/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
----
-title: Chatbot For Files Langchain
-emoji: ⚡
-colorFrom: yellow
-colorTo: pink
-sdk: streamlit
-sdk_version: 1.19.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
-
-This is a chatbot that uses Langchain's Conversational Retrieval Chain to generate responses to user input. The chatbot can ingest files and use Pinecone (Pinecone API key required) or Chroma vector stores (no API key required) to retrieve relevant documents for generating responses. OpenAI's API key is also required. The UI is based on Streamlit.
-
-## Fun fact
-This README file is generated by this app after ingesting this python file. See the screenshot below.
-
-## Installation
-
-To install the required packages, run:
-
-```
-pip install -r requirements.txt
-```
-
-## Usage
-
-To run the chatbot, run:
-
-```
-streamlit run app.py
-```
-
-The chatbot will prompt the user for inputs and generate a response based on user's question and the chat history.
-
-## Ingesting Files
-
-To ingest files, select "Yes" when prompted and upload the files. The chatbot will split the files into smaller documents and ingest them into the vector store.
-
-## Using Pinecone
-
-To use Pinecone, select "Yes" when prompted and enter the name of the Pinecone index. Make sure to set the `PINECONE_API_KEY` and `PINECONE_API_ENV` environment variables.
-
-## Using Chroma
-
-To use Chroma, enter the name of the Chroma collection when prompted. The chatbot will create a Chroma vector store in the `persist_directory` specified in the code.
-
-
-## Screenshot
-Use Pinecone and not ingest files:
-
-
-
-Use Chroma and ingest files:
-
-
diff --git a/spaces/aadnk/faster-whisper-webui/src/whisper/abstractWhisperContainer.py b/spaces/aadnk/faster-whisper-webui/src/whisper/abstractWhisperContainer.py
deleted file mode 100644
index 98cae0679185e2142f3cd3c7bdf35ab67640d5b2..0000000000000000000000000000000000000000
--- a/spaces/aadnk/faster-whisper-webui/src/whisper/abstractWhisperContainer.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import abc
-from typing import Any, Callable, List
-
-from src.config import ModelConfig, VadInitialPromptMode
-
-from src.hooks.progressListener import ProgressListener
-from src.modelCache import GLOBAL_MODEL_CACHE, ModelCache
-from src.prompts.abstractPromptStrategy import AbstractPromptStrategy
-
-class AbstractWhisperCallback:
- def __init__(self):
- pass
-
- @abc.abstractmethod
- def invoke(self, audio, segment_index: int, prompt: str, detected_language: str, progress_listener: ProgressListener = None):
- """
- Peform the transcription of the given audio file or data.
-
- Parameters
- ----------
- audio: Union[str, np.ndarray, torch.Tensor]
- The audio file to transcribe, or the audio data as a numpy array or torch tensor.
- segment_index: int
- The target language of the transcription. If not specified, the language will be inferred from the audio content.
- task: str
- The task - either translate or transcribe.
- progress_listener: ProgressListener
- A callback to receive progress updates.
- """
- raise NotImplementedError()
-
-class LambdaWhisperCallback(AbstractWhisperCallback):
- def __init__(self, callback_lambda: Callable[[Any, int, str, str, ProgressListener], None]):
- super().__init__()
- self.callback_lambda = callback_lambda
-
- def invoke(self, audio, segment_index: int, prompt: str, detected_language: str, progress_listener: ProgressListener = None):
- return self.callback_lambda(audio, segment_index, prompt, detected_language, progress_listener)
-
-class AbstractWhisperContainer:
- def __init__(self, model_name: str, device: str = None, compute_type: str = "float16",
- download_root: str = None,
- cache: ModelCache = None, models: List[ModelConfig] = []):
- self.model_name = model_name
- self.device = device
- self.compute_type = compute_type
- self.download_root = download_root
- self.cache = cache
-
- # Will be created on demand
- self.model = None
-
- # List of known models
- self.models = models
-
- def get_model(self):
- if self.model is None:
-
- if (self.cache is None):
- self.model = self._create_model()
- else:
- model_key = "WhisperContainer." + self.model_name + ":" + (self.device if self.device else '')
- self.model = self.cache.get(model_key, self._create_model)
- return self.model
-
- @abc.abstractmethod
- def _create_model(self):
- raise NotImplementedError()
-
- def ensure_downloaded(self):
- pass
-
- @abc.abstractmethod
- def create_callback(self, language: str = None, task: str = None,
- prompt_strategy: AbstractPromptStrategy = None,
- **decodeOptions: dict) -> AbstractWhisperCallback:
- """
- Create a WhisperCallback object that can be used to transcript audio files.
-
- Parameters
- ----------
- language: str
- The target language of the transcription. If not specified, the language will be inferred from the audio content.
- task: str
- The task - either translate or transcribe.
- prompt_strategy: AbstractPromptStrategy
- The prompt strategy to use for the transcription.
- decodeOptions: dict
- Additional options to pass to the decoder. Must be pickleable.
-
- Returns
- -------
- A WhisperCallback object.
- """
- raise NotImplementedError()
-
- # This is required for multiprocessing
- def __getstate__(self):
- return {
- "model_name": self.model_name,
- "device": self.device,
- "download_root": self.download_root,
- "models": self.models,
- "compute_type": self.compute_type
- }
-
- def __setstate__(self, state):
- self.model_name = state["model_name"]
- self.device = state["device"]
- self.download_root = state["download_root"]
- self.models = state["models"]
- self.compute_type = state["compute_type"]
- self.model = None
- # Depickled objects must use the global cache
- self.cache = GLOBAL_MODEL_CACHE
\ No newline at end of file
diff --git a/spaces/abhishek/first-order-motion-model/modules/model.py b/spaces/abhishek/first-order-motion-model/modules/model.py
deleted file mode 100644
index 7ee07c0b6357e29a61ede96efcc6ee7b44332bb1..0000000000000000000000000000000000000000
--- a/spaces/abhishek/first-order-motion-model/modules/model.py
+++ /dev/null
@@ -1,259 +0,0 @@
-from torch import nn
-import torch
-import torch.nn.functional as F
-from modules.util import AntiAliasInterpolation2d, make_coordinate_grid
-from torchvision import models
-import numpy as np
-from torch.autograd import grad
-
-
-class Vgg19(torch.nn.Module):
- """
- Vgg19 network for perceptual loss. See Sec 3.3.
- """
- def __init__(self, requires_grad=False):
- super(Vgg19, self).__init__()
- vgg_pretrained_features = models.vgg19(pretrained=True).features
- self.slice1 = torch.nn.Sequential()
- self.slice2 = torch.nn.Sequential()
- self.slice3 = torch.nn.Sequential()
- self.slice4 = torch.nn.Sequential()
- self.slice5 = torch.nn.Sequential()
- for x in range(2):
- self.slice1.add_module(str(x), vgg_pretrained_features[x])
- for x in range(2, 7):
- self.slice2.add_module(str(x), vgg_pretrained_features[x])
- for x in range(7, 12):
- self.slice3.add_module(str(x), vgg_pretrained_features[x])
- for x in range(12, 21):
- self.slice4.add_module(str(x), vgg_pretrained_features[x])
- for x in range(21, 30):
- self.slice5.add_module(str(x), vgg_pretrained_features[x])
-
- self.mean = torch.nn.Parameter(data=torch.Tensor(np.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1))),
- requires_grad=False)
- self.std = torch.nn.Parameter(data=torch.Tensor(np.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))),
- requires_grad=False)
-
- if not requires_grad:
- for param in self.parameters():
- param.requires_grad = False
-
- def forward(self, X):
- X = (X - self.mean) / self.std
- h_relu1 = self.slice1(X)
- h_relu2 = self.slice2(h_relu1)
- h_relu3 = self.slice3(h_relu2)
- h_relu4 = self.slice4(h_relu3)
- h_relu5 = self.slice5(h_relu4)
- out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
- return out
-
-
-class ImagePyramide(torch.nn.Module):
- """
- Create image pyramide for computing pyramide perceptual loss. See Sec 3.3
- """
- def __init__(self, scales, num_channels):
- super(ImagePyramide, self).__init__()
- downs = {}
- for scale in scales:
- downs[str(scale).replace('.', '-')] = AntiAliasInterpolation2d(num_channels, scale)
- self.downs = nn.ModuleDict(downs)
-
- def forward(self, x):
- out_dict = {}
- for scale, down_module in self.downs.items():
- out_dict['prediction_' + str(scale).replace('-', '.')] = down_module(x)
- return out_dict
-
-
-class Transform:
- """
- Random tps transformation for equivariance constraints. See Sec 3.3
- """
- def __init__(self, bs, **kwargs):
- noise = torch.normal(mean=0, std=kwargs['sigma_affine'] * torch.ones([bs, 2, 3]))
- self.theta = noise + torch.eye(2, 3).view(1, 2, 3)
- self.bs = bs
-
- if ('sigma_tps' in kwargs) and ('points_tps' in kwargs):
- self.tps = True
- self.control_points = make_coordinate_grid((kwargs['points_tps'], kwargs['points_tps']), type=noise.type())
- self.control_points = self.control_points.unsqueeze(0)
- self.control_params = torch.normal(mean=0,
- std=kwargs['sigma_tps'] * torch.ones([bs, 1, kwargs['points_tps'] ** 2]))
- else:
- self.tps = False
-
- def transform_frame(self, frame):
- grid = make_coordinate_grid(frame.shape[2:], type=frame.type()).unsqueeze(0)
- grid = grid.view(1, frame.shape[2] * frame.shape[3], 2)
- grid = self.warp_coordinates(grid).view(self.bs, frame.shape[2], frame.shape[3], 2)
- return F.grid_sample(frame, grid, padding_mode="reflection")
-
- def warp_coordinates(self, coordinates):
- theta = self.theta.type(coordinates.type())
- theta = theta.unsqueeze(1)
- transformed = torch.matmul(theta[:, :, :, :2], coordinates.unsqueeze(-1)) + theta[:, :, :, 2:]
- transformed = transformed.squeeze(-1)
-
- if self.tps:
- control_points = self.control_points.type(coordinates.type())
- control_params = self.control_params.type(coordinates.type())
- distances = coordinates.view(coordinates.shape[0], -1, 1, 2) - control_points.view(1, 1, -1, 2)
- distances = torch.abs(distances).sum(-1)
-
- result = distances ** 2
- result = result * torch.log(distances + 1e-6)
- result = result * control_params
- result = result.sum(dim=2).view(self.bs, coordinates.shape[1], 1)
- transformed = transformed + result
-
- return transformed
-
- def jacobian(self, coordinates):
- new_coordinates = self.warp_coordinates(coordinates)
- grad_x = grad(new_coordinates[..., 0].sum(), coordinates, create_graph=True)
- grad_y = grad(new_coordinates[..., 1].sum(), coordinates, create_graph=True)
- jacobian = torch.cat([grad_x[0].unsqueeze(-2), grad_y[0].unsqueeze(-2)], dim=-2)
- return jacobian
-
-
-def detach_kp(kp):
- return {key: value.detach() for key, value in kp.items()}
-
-
-class GeneratorFullModel(torch.nn.Module):
- """
- Merge all generator related updates into single model for better multi-gpu usage
- """
-
- def __init__(self, kp_extractor, generator, discriminator, train_params):
- super(GeneratorFullModel, self).__init__()
- self.kp_extractor = kp_extractor
- self.generator = generator
- self.discriminator = discriminator
- self.train_params = train_params
- self.scales = train_params['scales']
- self.disc_scales = self.discriminator.scales
- self.pyramid = ImagePyramide(self.scales, generator.num_channels)
- if torch.cuda.is_available():
- self.pyramid = self.pyramid.cuda()
-
- self.loss_weights = train_params['loss_weights']
-
- if sum(self.loss_weights['perceptual']) != 0:
- self.vgg = Vgg19()
- if torch.cuda.is_available():
- self.vgg = self.vgg.cuda()
-
- def forward(self, x):
- kp_source = self.kp_extractor(x['source'])
- kp_driving = self.kp_extractor(x['driving'])
-
- generated = self.generator(x['source'], kp_source=kp_source, kp_driving=kp_driving)
- generated.update({'kp_source': kp_source, 'kp_driving': kp_driving})
-
- loss_values = {}
-
- pyramide_real = self.pyramid(x['driving'])
- pyramide_generated = self.pyramid(generated['prediction'])
-
- if sum(self.loss_weights['perceptual']) != 0:
- value_total = 0
- for scale in self.scales:
- x_vgg = self.vgg(pyramide_generated['prediction_' + str(scale)])
- y_vgg = self.vgg(pyramide_real['prediction_' + str(scale)])
-
- for i, weight in enumerate(self.loss_weights['perceptual']):
- value = torch.abs(x_vgg[i] - y_vgg[i].detach()).mean()
- value_total += self.loss_weights['perceptual'][i] * value
- loss_values['perceptual'] = value_total
-
- if self.loss_weights['generator_gan'] != 0:
- discriminator_maps_generated = self.discriminator(pyramide_generated, kp=detach_kp(kp_driving))
- discriminator_maps_real = self.discriminator(pyramide_real, kp=detach_kp(kp_driving))
- value_total = 0
- for scale in self.disc_scales:
- key = 'prediction_map_%s' % scale
- value = ((1 - discriminator_maps_generated[key]) ** 2).mean()
- value_total += self.loss_weights['generator_gan'] * value
- loss_values['gen_gan'] = value_total
-
- if sum(self.loss_weights['feature_matching']) != 0:
- value_total = 0
- for scale in self.disc_scales:
- key = 'feature_maps_%s' % scale
- for i, (a, b) in enumerate(zip(discriminator_maps_real[key], discriminator_maps_generated[key])):
- if self.loss_weights['feature_matching'][i] == 0:
- continue
- value = torch.abs(a - b).mean()
- value_total += self.loss_weights['feature_matching'][i] * value
- loss_values['feature_matching'] = value_total
-
- if (self.loss_weights['equivariance_value'] + self.loss_weights['equivariance_jacobian']) != 0:
- transform = Transform(x['driving'].shape[0], **self.train_params['transform_params'])
- transformed_frame = transform.transform_frame(x['driving'])
- transformed_kp = self.kp_extractor(transformed_frame)
-
- generated['transformed_frame'] = transformed_frame
- generated['transformed_kp'] = transformed_kp
-
- ## Value loss part
- if self.loss_weights['equivariance_value'] != 0:
- value = torch.abs(kp_driving['value'] - transform.warp_coordinates(transformed_kp['value'])).mean()
- loss_values['equivariance_value'] = self.loss_weights['equivariance_value'] * value
-
- ## jacobian loss part
- if self.loss_weights['equivariance_jacobian'] != 0:
- jacobian_transformed = torch.matmul(transform.jacobian(transformed_kp['value']),
- transformed_kp['jacobian'])
-
- normed_driving = torch.inverse(kp_driving['jacobian'])
- normed_transformed = jacobian_transformed
- value = torch.matmul(normed_driving, normed_transformed)
-
- eye = torch.eye(2).view(1, 1, 2, 2).type(value.type())
-
- value = torch.abs(eye - value).mean()
- loss_values['equivariance_jacobian'] = self.loss_weights['equivariance_jacobian'] * value
-
- return loss_values, generated
-
-
-class DiscriminatorFullModel(torch.nn.Module):
- """
- Merge all discriminator related updates into single model for better multi-gpu usage
- """
-
- def __init__(self, kp_extractor, generator, discriminator, train_params):
- super(DiscriminatorFullModel, self).__init__()
- self.kp_extractor = kp_extractor
- self.generator = generator
- self.discriminator = discriminator
- self.train_params = train_params
- self.scales = self.discriminator.scales
- self.pyramid = ImagePyramide(self.scales, generator.num_channels)
- if torch.cuda.is_available():
- self.pyramid = self.pyramid.cuda()
-
- self.loss_weights = train_params['loss_weights']
-
- def forward(self, x, generated):
- pyramide_real = self.pyramid(x['driving'])
- pyramide_generated = self.pyramid(generated['prediction'].detach())
-
- kp_driving = generated['kp_driving']
- discriminator_maps_generated = self.discriminator(pyramide_generated, kp=detach_kp(kp_driving))
- discriminator_maps_real = self.discriminator(pyramide_real, kp=detach_kp(kp_driving))
-
- loss_values = {}
- value_total = 0
- for scale in self.scales:
- key = 'prediction_map_%s' % scale
- value = (1 - discriminator_maps_real[key]) ** 2 + discriminator_maps_generated[key] ** 2
- value_total += self.loss_weights['discriminator_gan'] * value.mean()
- loss_values['disc_gan'] = value_total
-
- return loss_values
diff --git a/spaces/abhishek/first-order-motion-model/sync_batchnorm/__init__.py b/spaces/abhishek/first-order-motion-model/sync_batchnorm/__init__.py
deleted file mode 100644
index bc8709d92c610b36e0bcbd7da20c1eb41dc8cfcf..0000000000000000000000000000000000000000
--- a/spaces/abhishek/first-order-motion-model/sync_batchnorm/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# -*- coding: utf-8 -*-
-# File : __init__.py
-# Author : Jiayuan Mao
-# Email : maojiayuan@gmail.com
-# Date : 27/01/2018
-#
-# This file is part of Synchronized-BatchNorm-PyTorch.
-# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
-# Distributed under MIT License.
-
-from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
-from .replicate import DataParallelWithCallback, patch_replication_callback
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/saconv.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/saconv.py
deleted file mode 100644
index b4ee3978e097fca422805db4e31ae481006d7971..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/saconv.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from annotator.uniformer.mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init
-from annotator.uniformer.mmcv.ops.deform_conv import deform_conv2d
-from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
-
-
-@CONV_LAYERS.register_module(name='SAC')
-class SAConv2d(ConvAWS2d):
- """SAC (Switchable Atrous Convolution)
-
- This is an implementation of SAC in DetectoRS
- (https://arxiv.org/pdf/2006.02334.pdf).
-
- Args:
- in_channels (int): Number of channels in the input image
- out_channels (int): Number of channels produced by the convolution
- kernel_size (int or tuple): Size of the convolving kernel
- stride (int or tuple, optional): Stride of the convolution. Default: 1
- padding (int or tuple, optional): Zero-padding added to both sides of
- the input. Default: 0
- padding_mode (string, optional): ``'zeros'``, ``'reflect'``,
- ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
- dilation (int or tuple, optional): Spacing between kernel elements.
- Default: 1
- groups (int, optional): Number of blocked connections from input
- channels to output channels. Default: 1
- bias (bool, optional): If ``True``, adds a learnable bias to the
- output. Default: ``True``
- use_deform: If ``True``, replace convolution with deformable
- convolution. Default: ``False``.
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- bias=True,
- use_deform=False):
- super().__init__(
- in_channels,
- out_channels,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- groups=groups,
- bias=bias)
- self.use_deform = use_deform
- self.switch = nn.Conv2d(
- self.in_channels, 1, kernel_size=1, stride=stride, bias=True)
- self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size()))
- self.pre_context = nn.Conv2d(
- self.in_channels, self.in_channels, kernel_size=1, bias=True)
- self.post_context = nn.Conv2d(
- self.out_channels, self.out_channels, kernel_size=1, bias=True)
- if self.use_deform:
- self.offset_s = nn.Conv2d(
- self.in_channels,
- 18,
- kernel_size=3,
- padding=1,
- stride=stride,
- bias=True)
- self.offset_l = nn.Conv2d(
- self.in_channels,
- 18,
- kernel_size=3,
- padding=1,
- stride=stride,
- bias=True)
- self.init_weights()
-
- def init_weights(self):
- constant_init(self.switch, 0, bias=1)
- self.weight_diff.data.zero_()
- constant_init(self.pre_context, 0)
- constant_init(self.post_context, 0)
- if self.use_deform:
- constant_init(self.offset_s, 0)
- constant_init(self.offset_l, 0)
-
- def forward(self, x):
- # pre-context
- avg_x = F.adaptive_avg_pool2d(x, output_size=1)
- avg_x = self.pre_context(avg_x)
- avg_x = avg_x.expand_as(x)
- x = x + avg_x
- # switch
- avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect')
- avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)
- switch = self.switch(avg_x)
- # sac
- weight = self._get_weight(self.weight)
- zero_bias = torch.zeros(
- self.out_channels, device=weight.device, dtype=weight.dtype)
-
- if self.use_deform:
- offset = self.offset_s(avg_x)
- out_s = deform_conv2d(x, offset, weight, self.stride, self.padding,
- self.dilation, self.groups, 1)
- else:
- if (TORCH_VERSION == 'parrots'
- or digit_version(TORCH_VERSION) < digit_version('1.5.0')):
- out_s = super().conv2d_forward(x, weight)
- elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'):
- # bias is a required argument of _conv_forward in torch 1.8.0
- out_s = super()._conv_forward(x, weight, zero_bias)
- else:
- out_s = super()._conv_forward(x, weight)
- ori_p = self.padding
- ori_d = self.dilation
- self.padding = tuple(3 * p for p in self.padding)
- self.dilation = tuple(3 * d for d in self.dilation)
- weight = weight + self.weight_diff
- if self.use_deform:
- offset = self.offset_l(avg_x)
- out_l = deform_conv2d(x, offset, weight, self.stride, self.padding,
- self.dilation, self.groups, 1)
- else:
- if (TORCH_VERSION == 'parrots'
- or digit_version(TORCH_VERSION) < digit_version('1.5.0')):
- out_l = super().conv2d_forward(x, weight)
- elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'):
- # bias is a required argument of _conv_forward in torch 1.8.0
- out_l = super()._conv_forward(x, weight, zero_bias)
- else:
- out_l = super()._conv_forward(x, weight)
-
- out = switch * out_s + (1 - switch) * out_l
- self.padding = ori_p
- self.dilation = ori_d
- # post-context
- avg_x = F.adaptive_avg_pool2d(out, output_size=1)
- avg_x = self.post_context(avg_x)
- avg_x = avg_x.expand_as(out)
- out = out + avg_x
- return out
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py
deleted file mode 100644
index 6c154cb3c0d9d7639c3d4a2a1272406d3fab8acd..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py
+++ /dev/null
@@ -1,172 +0,0 @@
-import torch.nn as nn
-from mmcv.cnn import ConvModule, normal_init, xavier_init
-
-from mmdet.models.backbones.resnet import Bottleneck
-from mmdet.models.builder import HEADS
-from .bbox_head import BBoxHead
-
-
-class BasicResBlock(nn.Module):
- """Basic residual block.
-
- This block is a little different from the block in the ResNet backbone.
- The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.
-
- Args:
- in_channels (int): Channels of the input feature map.
- out_channels (int): Channels of the output feature map.
- conv_cfg (dict): The config dict for convolution layers.
- norm_cfg (dict): The config dict for normalization layers.
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- conv_cfg=None,
- norm_cfg=dict(type='BN')):
- super(BasicResBlock, self).__init__()
-
- # main path
- self.conv1 = ConvModule(
- in_channels,
- in_channels,
- kernel_size=3,
- padding=1,
- bias=False,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg)
- self.conv2 = ConvModule(
- in_channels,
- out_channels,
- kernel_size=1,
- bias=False,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=None)
-
- # identity path
- self.conv_identity = ConvModule(
- in_channels,
- out_channels,
- kernel_size=1,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=None)
-
- self.relu = nn.ReLU(inplace=True)
-
- def forward(self, x):
- identity = x
-
- x = self.conv1(x)
- x = self.conv2(x)
-
- identity = self.conv_identity(identity)
- out = x + identity
-
- out = self.relu(out)
- return out
-
-
-@HEADS.register_module()
-class DoubleConvFCBBoxHead(BBoxHead):
- r"""Bbox head used in Double-Head R-CNN
-
- .. code-block:: none
-
- /-> cls
- /-> shared convs ->
- \-> reg
- roi features
- /-> cls
- \-> shared fc ->
- \-> reg
- """ # noqa: W605
-
- def __init__(self,
- num_convs=0,
- num_fcs=0,
- conv_out_channels=1024,
- fc_out_channels=1024,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- **kwargs):
- kwargs.setdefault('with_avg_pool', True)
- super(DoubleConvFCBBoxHead, self).__init__(**kwargs)
- assert self.with_avg_pool
- assert num_convs > 0
- assert num_fcs > 0
- self.num_convs = num_convs
- self.num_fcs = num_fcs
- self.conv_out_channels = conv_out_channels
- self.fc_out_channels = fc_out_channels
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
-
- # increase the channel of input features
- self.res_block = BasicResBlock(self.in_channels,
- self.conv_out_channels)
-
- # add conv heads
- self.conv_branch = self._add_conv_branch()
- # add fc heads
- self.fc_branch = self._add_fc_branch()
-
- out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes
- self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
-
- self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1)
- self.relu = nn.ReLU(inplace=True)
-
- def _add_conv_branch(self):
- """Add the fc branch which consists of a sequential of conv layers."""
- branch_convs = nn.ModuleList()
- for i in range(self.num_convs):
- branch_convs.append(
- Bottleneck(
- inplanes=self.conv_out_channels,
- planes=self.conv_out_channels // 4,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg))
- return branch_convs
-
- def _add_fc_branch(self):
- """Add the fc branch which consists of a sequential of fc layers."""
- branch_fcs = nn.ModuleList()
- for i in range(self.num_fcs):
- fc_in_channels = (
- self.in_channels *
- self.roi_feat_area if i == 0 else self.fc_out_channels)
- branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
- return branch_fcs
-
- def init_weights(self):
- # conv layers are already initialized by ConvModule
- normal_init(self.fc_cls, std=0.01)
- normal_init(self.fc_reg, std=0.001)
-
- for m in self.fc_branch.modules():
- if isinstance(m, nn.Linear):
- xavier_init(m, distribution='uniform')
-
- def forward(self, x_cls, x_reg):
- # conv head
- x_conv = self.res_block(x_reg)
-
- for conv in self.conv_branch:
- x_conv = conv(x_conv)
-
- if self.with_avg_pool:
- x_conv = self.avg_pool(x_conv)
-
- x_conv = x_conv.view(x_conv.size(0), -1)
- bbox_pred = self.fc_reg(x_conv)
-
- # fc head
- x_fc = x_cls.view(x_cls.size(0), -1)
- for fc in self.fc_branch:
- x_fc = self.relu(fc(x_fc))
-
- cls_score = self.fc_cls(x_fc)
-
- return cls_score, bbox_pred
diff --git a/spaces/abidlabs/crowd-speech/app.py b/spaces/abidlabs/crowd-speech/app.py
deleted file mode 100644
index cce13a951fb6cb1f8aa0b6b95007c4fe853ed726..0000000000000000000000000000000000000000
--- a/spaces/abidlabs/crowd-speech/app.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import os
-import gradio as gr
-
-HF_TOKEN = os.getenv('HF_TOKEN')
-
-hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced-speech-demo2")
-
-iface = gr.Interface(
- lambda x:"Test",
- inputs="mic",
- outputs="textbox",
- title="Crowdsourced Dataset for Speech to Text",
- article="This demo uses facebook/wav2vec2-base-960h for a speech-to-text model. Any data that gets flagged is added to the crowdsourced *dataset* found here: [https://huggingface.co/datasets/abidlabs/crowdsourced-speech-demo2](https://huggingface.co/datasets/abidlabs/crowdsourced-speech-demo2). This Space is experimental, and please only flag data that you are comfortable adding to a public dataset!",
- allow_flagging="manual",
- flagging_callback=hf_writer)
-
-iface.launch()
\ No newline at end of file
diff --git a/spaces/acmyu/frame_interpolation_prototype/trainer.py b/spaces/acmyu/frame_interpolation_prototype/trainer.py
deleted file mode 100644
index 4f5636cc3c8e2c92942da8ecdff273100f34f362..0000000000000000000000000000000000000000
--- a/spaces/acmyu/frame_interpolation_prototype/trainer.py
+++ /dev/null
@@ -1,257 +0,0 @@
-
-import os
-import time
-import torch
-import datetime
-import json
-
-import torch.nn as nn
-from torch.autograd import Variable
-from torchvision.utils import save_image
-
-from sagan_models import Generator, Discriminator
-from utils import *
-import frame_dataset
-
-
-class Trainer(object):
- def __init__(self, data_loader, config):
-
- # Data loader
- self.data_loader = data_loader
-
- # exact model and loss
- self.model = config.model
- self.adv_loss = config.adv_loss
-
- # Model hyper-parameters
- self.imsize = config.imsize
- self.g_num = config.g_num
- self.z_dim = config.z_dim
- self.g_conv_dim = config.g_conv_dim
- self.d_conv_dim = config.d_conv_dim
- self.parallel = config.parallel
-
- self.lambda_gp = config.lambda_gp
- self.total_step = config.total_step
- self.d_iters = config.d_iters
- self.batch_size = config.batch_size
- self.num_workers = config.num_workers
- self.g_lr = config.g_lr
- self.d_lr = config.d_lr
- self.lr_decay = config.lr_decay
- self.beta1 = config.beta1
- self.beta2 = config.beta2
- self.pretrained_model = config.pretrained_model
-
- self.dataset = config.dataset
- self.use_tensorboard = config.use_tensorboard
- self.image_path = config.image_path
- self.log_path = config.log_path
- self.model_save_path = config.model_save_path
- self.sample_path = config.sample_path
- self.log_step = config.log_step
- self.sample_step = config.sample_step
- self.model_save_step = config.model_save_step
- self.version = config.version
-
- # Path
- self.log_path = os.path.join(config.log_path, self.version)
- self.sample_path = os.path.join(config.sample_path, self.version)
- self.model_save_path = os.path.join(config.model_save_path, self.version)
-
- self.build_model()
-
- if self.use_tensorboard:
- self.build_tensorboard()
-
- # Start with trained model
- if self.pretrained_model:
- self.load_pretrained_model()
-
-
-
- def train(self):
-
- # Data iterator
- data_iter = iter(self.data_loader)
- step_per_epoch = len(self.data_loader)
- model_save_step = int(self.model_save_step * step_per_epoch)
-
- print('steps per epoch: '+str(step_per_epoch))
-
-
- # Fixed input for debugging
- fixed_z = tensor2var(torch.randn(self.batch_size, self.z_dim))
- if self.dataset == 'frames':
- fixed_data_loader = torch.utils.data.DataLoader(dataset=frame_dataset.FrameDataset(128, 'test'), batch_size=self.batch_size, shuffle=False, num_workers=0, drop_last=False)
- fixed_iter = iter(fixed_data_loader)
- imgs, _ = next(fixed_iter)
- fixed_z = getFrames(imgs)
- save_image(fixed_z.data, os.path.join(self.sample_path, '0real.png'))
-
- # Start with trained model
- if self.pretrained_model:
- start = self.pretrained_model + 1
- else:
- start = 0
-
- metrics = []
- if start > 0:
- with open("metrics.json") as metricsfile:
- metrics = json.load(metricsfile)
-
- # Start time
- start_time = time.time()
- for step in range(start, self.total_step):
-
- # ================== Train D ================== #
- self.D.train()
- self.G.train()
-
- try:
- real_images, _ = next(data_iter)
- except:
- data_iter = iter(self.data_loader)
- real_images, _ = next(data_iter)
-
- if self.dataset == 'frames':
- real_images = getFrames(real_images)
-
- # Compute loss with real images
- # dr1, dr2, df1, df2, gf1, gf2 are attention scores
- real_images = tensor2var(real_images)
- d_out_real,dr1,dr2 = self.D(real_images)
- if self.adv_loss == 'wgan-gp':
- d_loss_real = - torch.mean(d_out_real)
- elif self.adv_loss == 'hinge':
- d_loss_real = torch.nn.ReLU()(1.0 - d_out_real).mean()
-
- # apply Gumbel Softmax
- z = tensor2var(torch.randn(real_images.size(0), self.z_dim))
- fake_images,gf1,gf2 = self.G(real_images)
- d_out_fake,df1,df2 = self.D(fake_images)
-
- if self.adv_loss == 'wgan-gp':
- d_loss_fake = d_out_fake.mean()
- elif self.adv_loss == 'hinge':
- d_loss_fake = torch.nn.ReLU()(1.0 + d_out_fake).mean()
-
-
- # Backward + Optimize
- d_loss = d_loss_real + d_loss_fake
- self.reset_grad()
- d_loss.backward()
- self.d_optimizer.step()
-
-
- if self.adv_loss == 'wgan-gp':
- # Compute gradient penalty
- alpha = torch.rand(real_images.size(0), 1, 1, 1).cuda().expand_as(real_images)
- interpolated = Variable(alpha * real_images.data + (1 - alpha) * fake_images.data, requires_grad=True)
- out,_,_ = self.D(interpolated)
-
- grad = torch.autograd.grad(outputs=out,
- inputs=interpolated,
- grad_outputs=torch.ones(out.size()).cuda(),
- retain_graph=True,
- create_graph=True,
- only_inputs=True)[0]
-
- grad = grad.view(grad.size(0), -1)
- grad_l2norm = torch.sqrt(torch.sum(grad ** 2, dim=1))
- d_loss_gp = torch.mean((grad_l2norm - 1) ** 2)
-
- # Backward + Optimize
- d_loss = self.lambda_gp * d_loss_gp
-
- self.reset_grad()
- d_loss.backward()
- self.d_optimizer.step()
-
- # ================== Train G and gumbel ================== #
- # Create random noise
- z = tensor2var(torch.randn(real_images.size(0), self.z_dim))
- fake_images,_,_ = self.G(real_images)
-
- # Compute loss with fake images
- g_out_fake,_,_ = self.D(fake_images) # batch x n
- if self.adv_loss == 'wgan-gp':
- g_loss_fake = - g_out_fake.mean()
- elif self.adv_loss == 'hinge':
- g_loss_fake = - g_out_fake.mean()
-
- self.reset_grad()
- g_loss_fake.backward()
- self.g_optimizer.step()
-
-
- # Print out log info
- if (step + 1) % self.log_step == 0:
- elapsed = time.time() - start_time
- elapsed = str(datetime.timedelta(seconds=elapsed))
- print("Elapsed [{}], G_step [{}/{}], D_step[{}/{}], d_out_real: {:.4f}, "
- " d_out_fake: {:.4f}, g_loss: {:.4f}".
- format(elapsed, step + 1, self.total_step, (step + 1),
- self.total_step , d_loss_real.data,
- d_loss_fake.data, g_loss_fake.data ))
-
- # Sample images
- if (step + 1) % self.sample_step == 0:
- fake_images,_,_= self.G(fixed_z)
- save_image(fake_images.data,
- os.path.join(self.sample_path, '{}_fake.png'.format(step + 1)))
-
- metric = {}
- metric['epoch'] = float(step)/float(step_per_epoch)
- metric['step'] = step+1
- metric['g_loss'] = float(g_loss_fake.data)
- metric['real_loss'] = float(d_loss_real.data)
- metric['fake_loss'] = float(d_loss_fake.data)
- #metric['ssim'] = similarity
- metrics.append(metric)
- with open("metrics.json", "w") as outfile:
- json.dump(metrics, outfile)
-
- if (step+1) % model_save_step==0:
- torch.save(self.G.state_dict(),
- os.path.join(self.model_save_path, '{}_G.pth'.format(step + 1)))
- torch.save(self.D.state_dict(),
- os.path.join(self.model_save_path, '{}_D.pth'.format(step + 1)))
-
- def build_model(self):
-
- self.G = Generator(self.batch_size,self.imsize, self.z_dim, self.g_conv_dim).cuda()
- self.D = Discriminator(self.batch_size,self.imsize, self.d_conv_dim).cuda()
- if self.parallel:
- self.G = nn.DataParallel(self.G)
- self.D = nn.DataParallel(self.D)
-
- # Loss and optimizer
- # self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])
- self.g_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.G.parameters()), self.g_lr, [self.beta1, self.beta2])
- self.d_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.D.parameters()), self.d_lr, [self.beta1, self.beta2])
-
- self.c_loss = torch.nn.CrossEntropyLoss()
- # print networks
- print(self.G)
- print(self.D)
-
- def build_tensorboard(self):
- from logger import Logger
- self.logger = Logger(self.log_path)
-
- def load_pretrained_model(self):
- self.G.load_state_dict(torch.load(os.path.join(
- self.model_save_path, '{}_G.pth'.format(self.pretrained_model))))
- self.D.load_state_dict(torch.load(os.path.join(
- self.model_save_path, '{}_D.pth'.format(self.pretrained_model))))
- print('loaded trained models (step: {})..!'.format(self.pretrained_model))
-
- def reset_grad(self):
- self.d_optimizer.zero_grad()
- self.g_optimizer.zero_grad()
-
- def save_sample(self, data_iter):
- real_images, _ = next(data_iter)
- save_image(real_images, os.path.join(self.sample_path, 'real.png'))
diff --git a/spaces/aichina/youtube-whisper-09/README.md b/spaces/aichina/youtube-whisper-09/README.md
deleted file mode 100644
index 91b82badfea9c568fc1bb4412fe9c5981ef25988..0000000000000000000000000000000000000000
--- a/spaces/aichina/youtube-whisper-09/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Youtube Whisper
-emoji: ⚡
-colorFrom: green
-colorTo: red
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
-license: unknown
-duplicated_from: kazuk/youtube-whisper-09
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/aieye/named_entity_recognition_tutorial/utils/login.py b/spaces/aieye/named_entity_recognition_tutorial/utils/login.py
deleted file mode 100644
index 81d833b9044a5aee42af4674a9fb06d5a39c9201..0000000000000000000000000000000000000000
--- a/spaces/aieye/named_entity_recognition_tutorial/utils/login.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import streamlit as st
-import os
-import requests
-import os
-import json
-
-# Fetch the service account key JSON file contents
-
-
-def initialize_login():
- if "login" not in st.session_state:
- st.columns(3)[1].image("assets/logo.png")
- username = st.text_input("Username")
- password = st.text_input("Password", type="password")
-
- if st.button("Login"):
- authorized = authenticate(username, password)
- if authorized["status"]:
- st.session_state["login"] = authorized
- os.makedirs(
- os.path.join(".sessions", st.session_state["login"]["username"]),
- exist_ok=True,
- )
- st.success("Login Successful!")
- st.experimental_rerun()
- else:
- st.error("Invalid username or password")
- else:
- st.sidebar.success(f'Hello, {st.session_state["login"]["Name"]}!')
- st.sidebar.image("assets/logo.png", use_column_width=True)
-
-
-def authenticate(username, password):
- FIREBASE_WEB_API_KEY = os.environ.get("FIREBASE_WEB_API_KEY")
- payload = json.dumps(
- {
- "email": f"{username}@aieye.com",
- "password": password,
- "returnSecureToken": False,
- }
- )
-
- rest_api_url = (
- f"https://identitytoolkit.googleapis.com/v1/accounts:signInWithPassword"
- )
- r = requests.post(rest_api_url, params={"key": FIREBASE_WEB_API_KEY}, data=payload)
- print(r.json())
-
- if r.status_code == 200:
- return {
- "status": True,
- "Name": " ".join(username.split("_")),
- "username": username,
- }
- else:
- return {"status": False}
-
-
-def get_login():
- return st.session_state.get("login", {"status": False})
diff --git a/spaces/akhaliq/EimisAnimeDiffusion_1.0v/utils.py b/spaces/akhaliq/EimisAnimeDiffusion_1.0v/utils.py
deleted file mode 100644
index 5a46ead5bae7a2bbe4e4dc2e037a810cf21c0ceb..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/EimisAnimeDiffusion_1.0v/utils.py
+++ /dev/null
@@ -1,6 +0,0 @@
-def is_google_colab():
- try:
- import google.colab
- return True
- except:
- return False
diff --git a/spaces/akhaliq/GPEN/sr_model/real_esrnet.py b/spaces/akhaliq/GPEN/sr_model/real_esrnet.py
deleted file mode 100644
index fdf86641e6526b15250a6faa004a277d0c5f827b..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/GPEN/sr_model/real_esrnet.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import os
-import torch
-import numpy as np
-from rrdbnet_arch import RRDBNet
-from torch.nn import functional as F
-
-class RealESRNet(object):
- def __init__(self, base_dir='./', model=None, scale=2, device='cuda'):
- self.base_dir = base_dir
- self.scale = scale
- self.device = device
- self.load_srmodel(base_dir, model)
-
- def load_srmodel(self, base_dir, model):
- self.srmodel = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=32, num_block=23, num_grow_ch=32, scale=self.scale)
- if model is None:
- loadnet = torch.load(os.path.join(self.base_dir, 'weights', 'rrdb_realesrnet_psnr.pth'))
- else:
- loadnet = torch.load(os.path.join(self.base_dir, 'weights', model+'.pth'))
- #print(loadnet['params_ema'].keys)
- self.srmodel.load_state_dict(loadnet['params_ema'], strict=True)
- self.srmodel.eval()
- self.srmodel = self.srmodel.to(self.device)
-
- def process(self, img):
- img = img.astype(np.float32) / 255.
- img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
- img = img.unsqueeze(0).to(self.device)
-
- if self.scale == 2:
- mod_scale = 2
- elif self.scale == 1:
- mod_scale = 4
- else:
- mod_scale = None
- if mod_scale is not None:
- h_pad, w_pad = 0, 0
- _, _, h, w = img.size()
- if (h % mod_scale != 0):
- h_pad = (mod_scale - h % mod_scale)
- if (w % mod_scale != 0):
- w_pad = (mod_scale - w % mod_scale)
- img = F.pad(img, (0, w_pad, 0, h_pad), 'reflect')
-
- try:
- with torch.no_grad():
- output = self.srmodel(img)
- # remove extra pad
- if mod_scale is not None:
- _, _, h, w = output.size()
- output = output[:, :, 0:h - h_pad, 0:w - w_pad]
- output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
- output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0))
- output = (output * 255.0).round().astype(np.uint8)
-
- return output
- except:
- return None
\ No newline at end of file
diff --git a/spaces/akhaliq/Music_Source_Separation/bytesep/dataset_creation/pack_audios_to_hdf5s/voicebank-demand.py b/spaces/akhaliq/Music_Source_Separation/bytesep/dataset_creation/pack_audios_to_hdf5s/voicebank-demand.py
deleted file mode 100644
index 7e166cea948c6458faa78740a8297112e17f74ec..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/Music_Source_Separation/bytesep/dataset_creation/pack_audios_to_hdf5s/voicebank-demand.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import argparse
-import os
-import pathlib
-import time
-from concurrent.futures import ProcessPoolExecutor
-from typing import List, NoReturn
-
-import h5py
-import numpy as np
-
-from bytesep.utils import float32_to_int16, load_audio
-
-
-def pack_audios_to_hdf5s(args) -> NoReturn:
- r"""Pack (resampled) audio files into hdf5 files to speed up loading.
-
- Args:
- dataset_dir: str
- split: str, 'train' | 'test'
- hdf5s_dir: str, directory to write out hdf5 files
- sample_rate: int
- channels_num: int
- mono: bool
-
- Returns:
- NoReturn
- """
-
- # arguments & parameters
- dataset_dir = args.dataset_dir
- split = args.split
- hdf5s_dir = args.hdf5s_dir
- sample_rate = args.sample_rate
- channels = args.channels
- mono = True if channels == 1 else False
-
- # Only pack data for training data.
- assert split == "train"
-
- speech_dir = os.path.join(dataset_dir, "clean_{}set_wav".format(split))
- mixture_dir = os.path.join(dataset_dir, "noisy_{}set_wav".format(split))
-
- os.makedirs(hdf5s_dir, exist_ok=True)
-
- # Read names.
- audio_names = sorted(os.listdir(speech_dir))
-
- params = []
-
- for audio_index, audio_name in enumerate(audio_names):
-
- speech_path = os.path.join(speech_dir, audio_name)
- mixture_path = os.path.join(mixture_dir, audio_name)
-
- hdf5_path = os.path.join(
- hdf5s_dir, "{}.h5".format(pathlib.Path(audio_name).stem)
- )
-
- param = (
- audio_index,
- audio_name,
- speech_path,
- mixture_path,
- mono,
- sample_rate,
- hdf5_path,
- )
- params.append(param)
-
- # Uncomment for debug.
- # write_single_audio_to_hdf5(params[0])
- # os._exit(0)
-
- pack_hdf5s_time = time.time()
-
- with ProcessPoolExecutor(max_workers=None) as pool:
- # Maximum works on the machine
- pool.map(write_single_audio_to_hdf5, params)
-
- print("Pack hdf5 time: {:.3f} s".format(time.time() - pack_hdf5s_time))
-
-
-def write_single_audio_to_hdf5(param: List) -> NoReturn:
- r"""Write single audio into hdf5 file."""
-
- (
- audio_index,
- audio_name,
- speech_path,
- mixture_path,
- mono,
- sample_rate,
- hdf5_path,
- ) = param
-
- with h5py.File(hdf5_path, "w") as hf:
-
- hf.attrs.create("audio_name", data=audio_name, dtype="S100")
- hf.attrs.create("sample_rate", data=sample_rate, dtype=np.int32)
-
- speech = load_audio(audio_path=speech_path, mono=mono, sample_rate=sample_rate)
- # speech: (channels_num, audio_samples)
-
- mixture = load_audio(
- audio_path=mixture_path, mono=mono, sample_rate=sample_rate
- )
- # mixture: (channels_num, audio_samples)
-
- noise = mixture - speech
- # noise: (channels_num, audio_samples)
-
- hf.create_dataset(name='speech', data=float32_to_int16(speech), dtype=np.int16)
- hf.create_dataset(name='noise', data=float32_to_int16(noise), dtype=np.int16)
-
- print('{} Write hdf5 to {}'.format(audio_index, hdf5_path))
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
-
- parser.add_argument(
- "--dataset_dir",
- type=str,
- required=True,
- help="Directory of the Voicebank-Demand dataset.",
- )
- parser.add_argument("--split", type=str, required=True, choices=["train", "test"])
- parser.add_argument(
- "--hdf5s_dir",
- type=str,
- required=True,
- help="Directory to write out hdf5 files.",
- )
- parser.add_argument("--sample_rate", type=int, required=True, help="Sample rate.")
- parser.add_argument(
- "--channels", type=int, required=True, help="Use 1 for mono, 2 for stereo."
- )
-
- # Parse arguments.
- args = parser.parse_args()
-
- # Pack audios into hdf5 files.
- pack_audios_to_hdf5s(args)
diff --git a/spaces/akhaliq/RealBasicVSR/README.md b/spaces/akhaliq/RealBasicVSR/README.md
deleted file mode 100644
index 6f5f3a8b32486d6f907d8d3072175d2011a2ac51..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/RealBasicVSR/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: RealBasicVSR
-emoji: 🏢
-colorFrom: blue
-colorTo: green
-sdk: gradio
-sdk_version: 2.8.9
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/akhaliq/speechbrain-speech-seperation/README.md b/spaces/akhaliq/speechbrain-speech-seperation/README.md
deleted file mode 100644
index f105ddef25d94aa07f533afe1420d499b3f48fba..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/speechbrain-speech-seperation/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Speechbrain-speech-seperation
-emoji: 👁
-colorFrom: red
-colorTo: gray
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/index/__init__.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/index/__init__.py
deleted file mode 100644
index 7a17b7b3b6ad49157ee41f3da304fec3d32342d3..0000000000000000000000000000000000000000
--- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/index/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-"""Index interaction code
-"""
diff --git a/spaces/allknowingroger/Image-Models-Test79/README.md b/spaces/allknowingroger/Image-Models-Test79/README.md
deleted file mode 100644
index 827425b75bed17c6e0f887833228dad24294d0ee..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test79/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: More Image Models
-emoji: 😻
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: true
-duplicated_from: allknowingroger/Image-Models-Test78
----
-
-
\ No newline at end of file
diff --git a/spaces/amitjamadagni/qs-benchmarks/plot_scripts/map_packages_colors_mgpu.py b/spaces/amitjamadagni/qs-benchmarks/plot_scripts/map_packages_colors_mgpu.py
deleted file mode 100644
index 15cef59d05bb67a92f02bc1579c80af57c00bdf8..0000000000000000000000000000000000000000
--- a/spaces/amitjamadagni/qs-benchmarks/plot_scripts/map_packages_colors_mgpu.py
+++ /dev/null
@@ -1,119 +0,0 @@
-import matplotlib.pyplot as plt
-import numpy as np
-from matplotlib import rc
-import matplotlib.ticker as ticker
-from matplotlib.ticker import MaxNLocator
-# from matplotlib import pyplot
-
-fig_width_pt = 246.0 # Get this from LaTeX using \showthe\columnwidth
-inches_per_pt = 1.0/72.27 # Convert pt to inch
-golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
-fig_width = fig_width_pt*inches_per_pt # width in inches
-fig_height = fig_width*golden_mean # height in inches
-# fig_size = [fig_width+1.25,fig_height+1.25]
-# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
-params = {'backend': 'ps',
- 'axes.labelsize': 14,
- 'axes.titlesize': 12,
- 'font.size': 8,
- 'legend.fontsize': 12,
- 'xtick.labelsize': 14,
- 'ytick.labelsize': 14,}
-# 'text.usetex': True}
-# 'figure.figsize': fig_size}
-# plt.rc('text.latex', preamble=r'\usepackage{braket}')
-plt.rcParams.update(params)
-
-# cm = plt.get_cmap('tab20')
-# n_colors = 20
-# x_arr = [cm(1.*i/n_colors) for i in range(n_colors)]
-# s_arr = ["o", "*", "s", "^", "D", "v"]
-# s_arr = Line2D.filled_markers*100
-
-x_arr = ['grey', 'indianred', 'thistle', 'red', 'saddlebrown', 'peru', 'darkorange', 'gold', 'darkkhaki', 'limegreen', 'darkslategray', 'deepskyblue', 'mediumpurple', 'darkorchid', 'magenta', 'aqua', 'lightgreen', 'lightcoral', 'chocolate', 'pink', 'darkmagenta', 'lightsalmon', 'darkcyan', 'tan']
-
-s_arr = ['o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd', 'P', 'X', '+', '2', '4']*50
-
-pkg_str = ['cirq', 'hybridq', 'intel_qs_cpp', 'pennylane_l', 'projectq', 'qcgpu', 'qibojit', 'qsimcirq', 'quest', 'svsim', 'yao', 'hiq', 'pennylane', 'qibo', 'qiskit', 'qrack_sch', 'qulacs', 'cuquantum_qiskit', 'cuquantum_qsimcirq', 'qpanda', 'qpp', 'myqlm', 'myqlm_cpp', 'braket']
-
-task = ['hdyn', 'rqc', 'qft']
-
-com_cap = ['singlethread', 'multithread', 'gpu']
-
-prec = ['sp', 'dp']
-
-storage_dict = {}
-for pkg in pkg_str:
- storage_dict.update({pkg:pkg})
-
-label_dict = {}
-for pkg in pkg_str:
- for t in task:
- for cc in com_cap:
- for p in prec:
- label_dict.update({pkg+'_'+t+'_'+cc+'_'+p:pkg})
-
-
-for t in task:
- for p in prec:
- for ngpu in [1, 2, 4, 8]:
- label_dict.update({'cuquantum_qiskit_{}_gpu_{}_{}'.format(t, ngpu, p):'[{}]cuquantum(qiskit)'.format(ngpu)})
-
-for t in task:
- for p in prec:
- for ngpu in [1, 2, 4, 8]:
- label_dict.update({'cuquantum_qsimcirq_{}_gpu_{}_{}'.format(t, ngpu, p):'[{}]cuquantum(qsimcirq)'.format(ngpu)})
-
-for t in task:
- for p in prec:
- for ngpu in [1, 2, 4, 8]:
- label_dict.update({'qibojit_{}_gpu_{}_{}'.format(t, ngpu, p):'[{}]qibojit'.format(ngpu)})
-
-color_dict = {}
-n_c = 0
-for pkg in pkg_str:
- for t in task:
- for cc in com_cap:
- for p in prec:
- color_dict.update({pkg+'_'+t+'_'+cc+'_'+p:x_arr[n_c]})
- n_c = n_c + 1
-
-for t in task:
- for p in prec:
- for ngpu in [1, 2, 4, 8]:
- color_dict.update({'cuquantum_qiskit_{}_gpu_{}_{}'.format(t, ngpu, p):color_dict['cuquantum_qiskit_{}_gpu_{}'.format(t, p)]})
-
-for t in task:
- for p in prec:
- for ngpu in [1, 2, 4, 8]:
- color_dict.update({'cuquantum_qsimcirq_{}_gpu_{}_{}'.format(t, ngpu, p):color_dict['cuquantum_qsimcirq_{}_gpu_{}'.format(t, p)]})
-
-for t in task:
- for p in prec:
- for ngpu in [1, 2, 4, 8]:
- color_dict.update({'qibojit_{}_gpu_{}_{}'.format(t, ngpu, p):color_dict['qibojit_{}_gpu_{}'.format(t, p)]})
-
-symbol_dict = {}
-n_s = 0
-for pkg in pkg_str:
- for t in task:
- for cc in com_cap:
- for p in prec:
- symbol_dict.update({pkg+'_'+t+'_'+cc+'_'+p:s_arr[n_s]})
- n_s = n_s + 1
-
-m_arr = ['v', '^', '<', '>']
-for t in task:
- for p in prec:
- for ind, ngpu in enumerate([1, 2, 4, 8]):
- symbol_dict.update({'cuquantum_qiskit_{}_gpu_{}_{}'.format(t, ngpu, p):m_arr[ind]})
-
-for t in task:
- for p in prec:
- for ind, ngpu in enumerate([1, 2, 4, 8]):
- symbol_dict.update({'cuquantum_qsimcirq_{}_gpu_{}_{}'.format(t, ngpu, p):m_arr[ind]})
-
-for t in task:
- for p in prec:
- for ind, ngpu in enumerate([1, 2, 4, 8]):
- symbol_dict.update({'qibojit_{}_gpu_{}_{}'.format(t, ngpu, p):m_arr[ind]})
diff --git a/spaces/andersab/QuijoBERT/app.py b/spaces/andersab/QuijoBERT/app.py
deleted file mode 100644
index f9880a37d7b2ce141d51e004a0082a8e7d2e3622..0000000000000000000000000000000000000000
--- a/spaces/andersab/QuijoBERT/app.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import gradio as gr
-
-from transformers import pipeline
-
-fill_mask = pipeline("fill-mask", model="./QuijoBERT", tokenizer = './QuijoBERT')
-
-def predict(text):
-
- res_dict = {}
- x = fill_mask(text)
- print('x')
- for i in range(len(x)):
- k = x[i]['sequence']
- e = x[i]['score']
- print(k, e)
- if e >= 0.05:
- res_dict[k] = e
- print (res_dict)
- return res_dict
- #return {x[0]["sequence"], x[0]["score"]}
-
-# texto = 'en un lugar de la '
-# print(predict(texto))
-
-iface = gr.Interface(
- fn=predict,
- inputs='text',
- outputs ='label',
- examples=['En un lugar de la ', 'En verdad, Sancho', 'Cómo has estado, bien mío, de mis ojos, compañero mío']
-)
-
-
-iface.launch()
\ No newline at end of file
diff --git a/spaces/aravind123456789/OPENAIAPP/app.py b/spaces/aravind123456789/OPENAIAPP/app.py
deleted file mode 100644
index b81bf231e223eb1c9eb3da9d54ed240adfac4297..0000000000000000000000000000000000000000
--- a/spaces/aravind123456789/OPENAIAPP/app.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import os
-import gradio as gr
-from langchain.chat_models import ChatOpenAI
-from langchain import LLMChain, PromptTemplate
-from langchain.memory import ConversationBufferMemory
-
-OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
-
-template = """You are an enthusiastic high school student passionate about science and exploration. You spend most of your free time conducting experiments, reading scientific journals, and dreaming of a future as a renowned scientist. Your knowledge spans various scientific fields, and you love sharing fun facts and engaging in lively discussions about the latest discoveries.
-{chat_history}
-User: {user_message}
-Chatbot:"""
-
-prompt = PromptTemplate(
- input_variables=["chat_history", "user_message"], template=template
-)
-
-memory = ConversationBufferMemory(memory_key="chat_history")
-
-llm_chain = LLMChain(
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
- prompt=prompt,
- verbose=True,
- memory=memory,
-)
-
-def get_text_response(user_message,history):
- response = llm_chain.predict(user_message = user_message)
- return response
-
-demo = gr.ChatInterface(get_text_response)
-
-if __name__ == "__main__":
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
diff --git a/spaces/arbml/Ashaar/poetry_diacritizer/config_manager.py b/spaces/arbml/Ashaar/poetry_diacritizer/config_manager.py
deleted file mode 100644
index 4473d6017694823444543bc86d7d9e8d0dee6aba..0000000000000000000000000000000000000000
--- a/spaces/arbml/Ashaar/poetry_diacritizer/config_manager.py
+++ /dev/null
@@ -1,350 +0,0 @@
-from enum import Enum
-import os
-from pathlib import Path
-import shutil
-import subprocess
-from typing import Any, Dict
-
-import ruamel.yaml
-import torch
-
-from poetry_diacritizer.models.baseline import BaseLineModel
-from poetry_diacritizer.models.cbhg import CBHGModel
-from poetry_diacritizer.models.gpt import GPTModel
-from poetry_diacritizer.models.seq2seq import Decoder as Seq2SeqDecoder, Encoder as Seq2SeqEncoder, Seq2Seq
-from poetry_diacritizer.models.tacotron_based import (
- Decoder as TacotronDecoder,
- Encoder as TacotronEncoder,
- Tacotron,
-)
-
-from poetry_diacritizer.options import AttentionType, LossType, OptimizerType
-from poetry_diacritizer.util.text_encoders import (
- ArabicEncoderWithStartSymbol,
- BasicArabicEncoder,
- TextEncoder,
-)
-
-
-class ConfigManager:
- """Co/home/almodhfer/Projects/daicritization/temp_results/CA_MSA/cbhg-new/model-10.ptnfig Manager"""
-
- def __init__(self, config_path: str, model_kind: str):
- available_models = ["baseline", "cbhg", "seq2seq", "tacotron_based", "gpt"]
- if model_kind not in available_models:
- raise TypeError(f"model_kind must be in {available_models}")
- self.config_path = Path(config_path)
- self.model_kind = model_kind
- self.yaml = ruamel.yaml.YAML()
- self.config: Dict[str, Any] = self._load_config()
- self.git_hash = self._get_git_hash()
- self.session_name = ".".join(
- [
- self.config["data_type"],
- self.config["session_name"],
- f"{model_kind}",
- ]
- )
-
- self.data_dir = Path(
- os.path.join(self.config["data_directory"], self.config["data_type"])
- )
- self.base_dir = Path(
- os.path.join(self.config["log_directory"], self.session_name)
- )
- self.log_dir = Path(os.path.join(self.base_dir, "logs"))
- self.prediction_dir = Path(os.path.join(self.base_dir, "predictions"))
- self.plot_dir = Path(os.path.join(self.base_dir, "plots"))
- self.models_dir = Path(os.path.join(self.base_dir, "models"))
- if "sp_model_path" in self.config:
- self.sp_model_path = self.config["sp_model_path"]
- else:
- self.sp_model_path = None
- self.text_encoder: TextEncoder = self.get_text_encoder()
- self.config["len_input_symbols"] = len(self.text_encoder.input_symbols)
- self.config["len_target_symbols"] = len(self.text_encoder.target_symbols)
- if self.model_kind in ["seq2seq", "tacotron_based"]:
- self.config["attention_type"] = AttentionType[self.config["attention_type"]]
- self.config["optimizer"] = OptimizerType[self.config["optimizer_type"]]
-
- def _load_config(self):
- with open(self.config_path, "rb") as model_yaml:
- _config = self.yaml.load(model_yaml)
- return _config
-
- @staticmethod
- def _get_git_hash():
- try:
- return (
- subprocess.check_output(["git", "describe", "--always"])
- .strip()
- .decode()
- )
- except Exception as e:
- print(f"WARNING: could not retrieve git hash. {e}")
-
- def _check_hash(self):
- try:
- git_hash = (
- subprocess.check_output(["git", "describe", "--always"])
- .strip()
- .decode()
- )
- if self.config["git_hash"] != git_hash:
- print(
- f"""WARNING: git hash mismatch. Current: {git_hash}.
- Config hash: {self.config['git_hash']}"""
- )
- except Exception as e:
- print(f"WARNING: could not check git hash. {e}")
-
- @staticmethod
- def _print_dict_values(values, key_name, level=0, tab_size=2):
- tab = level * tab_size * " "
- print(tab + "-", key_name, ":", values)
-
- def _print_dictionary(self, dictionary, recursion_level=0):
- for key in dictionary.keys():
- if isinstance(key, dict):
- recursion_level += 1
- self._print_dictionary(dictionary[key], recursion_level)
- else:
- self._print_dict_values(
- dictionary[key], key_name=key, level=recursion_level
- )
-
- def print_config(self):
- print("\nCONFIGURATION", self.session_name)
- self._print_dictionary(self.config)
-
- def update_config(self):
- self.config["git_hash"] = self._get_git_hash()
-
- def dump_config(self):
- self.update_config()
- _config = {}
- for key, val in self.config.items():
- if isinstance(val, Enum):
- _config[key] = val.name
- else:
- _config[key] = val
- with open(self.base_dir / "config.yml", "w") as model_yaml:
- self.yaml.dump(_config, model_yaml)
-
- def create_remove_dirs(
- self,
- clear_dir: bool = False,
- clear_logs: bool = False,
- clear_weights: bool = False,
- clear_all: bool = False,
- ):
- self.base_dir.mkdir(exist_ok=True, parents=True)
- self.plot_dir.mkdir(exist_ok=True)
- self.prediction_dir.mkdir(exist_ok=True)
- if clear_dir:
- delete = input(f"Delete {self.log_dir} AND {self.models_dir}? (y/[n])")
- if delete == "y":
- shutil.rmtree(self.log_dir, ignore_errors=True)
- shutil.rmtree(self.models_dir, ignore_errors=True)
- if clear_logs:
- delete = input(f"Delete {self.log_dir}? (y/[n])")
- if delete == "y":
- shutil.rmtree(self.log_dir, ignore_errors=True)
- if clear_weights:
- delete = input(f"Delete {self.models_dir}? (y/[n])")
- if delete == "y":
- shutil.rmtree(self.models_dir, ignore_errors=True)
- self.log_dir.mkdir(exist_ok=True)
- self.models_dir.mkdir(exist_ok=True)
-
- def get_last_model_path(self):
- """
- Given a checkpoint, get the last save model name
- Args:
- checkpoint (str): the path where models are saved
- """
- models = os.listdir(self.models_dir)
- models = [model for model in models if model[-3:] == ".pt"]
- if len(models) == 0:
- return None
- _max = max(int(m.split(".")[0].split("-")[0]) for m in models)
- model_name = f"{_max}-snapshot.pt"
- last_model_path = os.path.join(self.models_dir, model_name)
-
- return last_model_path
-
- def load_model(self, model_path: str = None):
- """
- loading a model from path
- Args:
- checkpoint (str): the path to the model
- name (str): the name of the model, which is in the path
- model (Tacotron): the model to load its save state
- optimizer: the optimizer to load its saved state
- """
-
- model = self.get_model()
-
- with open(self.base_dir / f"{self.model_kind}_network.txt", "w") as file:
- file.write(str(model))
-
- if model_path is None:
- last_model_path = self.get_last_model_path()
- if last_model_path is None:
- return model, 1
- else:
- last_model_path = model_path
-
- saved_model = torch.load(last_model_path)
- out = model.load_state_dict(saved_model["model_state_dict"])
- print(out)
- global_step = saved_model["global_step"] + 1
- return model, global_step
-
- def get_model(self, ignore_hash=False):
- if not ignore_hash:
- self._check_hash()
- if self.model_kind == "cbhg":
- return self.get_cbhg()
-
- elif self.model_kind == "seq2seq":
- return self.get_seq2seq()
-
- elif self.model_kind == "tacotron_based":
- return self.get_tacotron_based()
-
- elif self.model_kind == "baseline":
- return self.get_baseline()
-
- elif self.model_kind == "gpt":
- return self.get_gpt()
-
- def get_gpt(self):
- model = GPTModel(
- self.config["base_model_path"],
- freeze=self.config["freeze"],
- n_layer=self.config["n_layer"],
- use_lstm=self.config["use_lstm"],
- )
- return model
-
- def get_baseline(self):
- model = BaseLineModel(
- embedding_dim=self.config["embedding_dim"],
- inp_vocab_size=self.config["len_input_symbols"],
- targ_vocab_size=self.config["len_target_symbols"],
- layers_units=self.config["layers_units"],
- use_batch_norm=self.config["use_batch_norm"],
- )
-
- return model
-
- def get_cbhg(self):
- model = CBHGModel(
- embedding_dim=self.config["embedding_dim"],
- inp_vocab_size=self.config["len_input_symbols"],
- targ_vocab_size=self.config["len_target_symbols"],
- use_prenet=self.config["use_prenet"],
- prenet_sizes=self.config["prenet_sizes"],
- cbhg_gru_units=self.config["cbhg_gru_units"],
- cbhg_filters=self.config["cbhg_filters"],
- cbhg_projections=self.config["cbhg_projections"],
- post_cbhg_layers_units=self.config["post_cbhg_layers_units"],
- post_cbhg_use_batch_norm=self.config["post_cbhg_use_batch_norm"],
- )
-
- return model
-
- def get_seq2seq(self):
- encoder = Seq2SeqEncoder(
- embedding_dim=self.config["encoder_embedding_dim"],
- inp_vocab_size=self.config["len_input_symbols"],
- layers_units=self.config["encoder_units"],
- use_batch_norm=self.config["use_batch_norm"],
- )
-
- decoder = TacotronDecoder(
- self.config["len_target_symbols"],
- start_symbol_id=self.text_encoder.start_symbol_id,
- embedding_dim=self.config["decoder_embedding_dim"],
- encoder_dim=self.config["encoder_dim"],
- decoder_units=self.config["decoder_units"],
- decoder_layers=self.config["decoder_layers"],
- attention_type=self.config["attention_type"],
- attention_units=self.config["attention_units"],
- is_attention_accumulative=self.config["is_attention_accumulative"],
- use_prenet=self.config["use_decoder_prenet"],
- prenet_depth=self.config["decoder_prenet_depth"],
- teacher_forcing_probability=self.config["teacher_forcing_probability"],
- )
-
- model = Tacotron(encoder=encoder, decoder=decoder)
-
- return model
-
- def get_tacotron_based(self):
- encoder = TacotronEncoder(
- embedding_dim=self.config["encoder_embedding_dim"],
- inp_vocab_size=self.config["len_input_symbols"],
- prenet_sizes=self.config["prenet_sizes"],
- use_prenet=self.config["use_encoder_prenet"],
- cbhg_gru_units=self.config["cbhg_gru_units"],
- cbhg_filters=self.config["cbhg_filters"],
- cbhg_projections=self.config["cbhg_projections"],
- )
-
- decoder = TacotronDecoder(
- self.config["len_target_symbols"],
- start_symbol_id=self.text_encoder.start_symbol_id,
- embedding_dim=self.config["decoder_embedding_dim"],
- encoder_dim=self.config["encoder_dim"],
- decoder_units=self.config["decoder_units"],
- decoder_layers=self.config["decoder_layers"],
- attention_type=self.config["attention_type"],
- attention_units=self.config["attention_units"],
- is_attention_accumulative=self.config["is_attention_accumulative"],
- use_prenet=self.config["use_decoder_prenet"],
- prenet_depth=self.config["decoder_prenet_depth"],
- teacher_forcing_probability=self.config["teacher_forcing_probability"],
- )
-
- model = Tacotron(encoder=encoder, decoder=decoder)
-
- return model
-
- def get_text_encoder(self):
- """Getting the class of TextEncoder from config"""
- if self.config["text_cleaner"] not in [
- "basic_cleaners",
- "valid_arabic_cleaners",
- None,
- ]:
- raise Exception(f"cleaner is not known {self.config['text_cleaner']}")
-
- if self.config["text_encoder"] == "BasicArabicEncoder":
- text_encoder = BasicArabicEncoder(
- cleaner_fn=self.config["text_cleaner"], sp_model_path=self.sp_model_path
- )
- elif self.config["text_encoder"] == "ArabicEncoderWithStartSymbol":
- text_encoder = ArabicEncoderWithStartSymbol(
- cleaner_fn=self.config["text_cleaner"], sp_model_path=self.sp_model_path
- )
- else:
- raise Exception(
- f"the text encoder is not found {self.config['text_encoder']}"
- )
-
- return text_encoder
-
- def get_loss_type(self):
- try:
- loss_type = LossType[self.config["loss_type"]]
- except:
- raise Exception(f"The loss type is not correct {self.config['loss_type']}")
- return loss_type
-
-
-if __name__ == "__main__":
- config_path = "config/tacotron-base-config.yml"
- model_kind = "tacotron"
- config = ConfigManager(config_path=config_path, model_kind=model_kind)
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/atn/SemanticContext.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/atn/SemanticContext.py
deleted file mode 100644
index d4593195e0c4cdc80bd5760cd859aabb507deb60..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/atn/SemanticContext.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#
-# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-# Use of this file is governed by the BSD 3-clause license that
-# can be found in the LICENSE.txt file in the project root.
-#
-
-# A tree structure used to record the semantic context in which
-# an ATN configuration is valid. It's either a single predicate,
-# a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
-#
-#
I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
-# {@link SemanticContext} within the scope of this outer class.
-#
-from antlr4.Recognizer import Recognizer
-from antlr4.RuleContext import RuleContext
-from io import StringIO
-
-
-class SemanticContext(object):
- #
- # The default {@link SemanticContext}, which is semantically equivalent to
- # a predicate of the form {@code {true}?}.
- #
- NONE = None
-
- #
- # For context independent predicates, we evaluate them without a local
- # context (i.e., null context). That way, we can evaluate them without
- # having to create proper rule-specific context during prediction (as
- # opposed to the parser, which creates them naturally). In a practical
- # sense, this avoids a cast exception from RuleContext to myruleContext.
- #
- #
For context dependent predicates, we must pass in a local context so that
- # references such as $arg evaluate properly as _localctx.arg. We only
- # capture context dependent predicates in the context in which we begin
- # prediction, so we passed in the outer context here in case of context
- # dependent predicate evaluation.
- #
- def eval(self, parser:Recognizer , outerContext:RuleContext ):
- pass
-
- #
- # Evaluate the precedence predicates for the context and reduce the result.
- #
- # @param parser The parser instance.
- # @param outerContext The current parser context object.
- # @return The simplified semantic context after precedence predicates are
- # evaluated, which will be one of the following values.
- #
- #
{@link #NONE}: if the predicate simplifies to {@code true} after
- # precedence predicates are evaluated.
- #
{@code null}: if the predicate simplifies to {@code false} after
- # precedence predicates are evaluated.
- #
{@code this}: if the semantic context is not changed as a result of
- # precedence predicate evaluation.
- #
A non-{@code null} {@link SemanticContext}: the new simplified
- # semantic context after precedence predicates are evaluated.
- #
- #
- def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
- return self
-
-# need forward declaration
-AND = None
-
-def andContext(a:SemanticContext, b:SemanticContext):
- if a is None or a is SemanticContext.NONE:
- return b
- if b is None or b is SemanticContext.NONE:
- return a
- result = AND(a, b)
- if len(result.opnds) == 1:
- return result.opnds[0]
- else:
- return result
-
-# need forward declaration
-OR = None
-
-def orContext(a:SemanticContext, b:SemanticContext):
- if a is None:
- return b
- if b is None:
- return a
- if a is SemanticContext.NONE or b is SemanticContext.NONE:
- return SemanticContext.NONE
- result = OR(a, b)
- if len(result.opnds) == 1:
- return result.opnds[0]
- else:
- return result
-
-def filterPrecedencePredicates(collection:set):
- return [context for context in collection if isinstance(context, PrecedencePredicate)]
-
-
-class Predicate(SemanticContext):
-
- def __init__(self, ruleIndex:int=-1, predIndex:int=-1, isCtxDependent:bool=False):
- self.ruleIndex = ruleIndex
- self.predIndex = predIndex
- self.isCtxDependent = isCtxDependent # e.g., $i ref in pred
-
- def eval(self, parser:Recognizer , outerContext:RuleContext ):
- localctx = outerContext if self.isCtxDependent else None
- return parser.sempred(localctx, self.ruleIndex, self.predIndex)
-
- def __hash__(self):
- return hash((self.ruleIndex, self.predIndex, self.isCtxDependent))
-
- def __eq__(self, other):
- if self is other:
- return True
- elif not isinstance(other, Predicate):
- return False
- return self.ruleIndex == other.ruleIndex and \
- self.predIndex == other.predIndex and \
- self.isCtxDependent == other.isCtxDependent
-
- def __str__(self):
- return "{" + str(self.ruleIndex) + ":" + str(self.predIndex) + "}?"
-
-
-class PrecedencePredicate(SemanticContext):
-
- def __init__(self, precedence:int=0):
- self.precedence = precedence
-
- def eval(self, parser:Recognizer , outerContext:RuleContext ):
- return parser.precpred(outerContext, self.precedence)
-
- def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
- if parser.precpred(outerContext, self.precedence):
- return SemanticContext.NONE
- else:
- return None
-
- def __lt__(self, other):
- return self.precedence < other.precedence
-
- def __hash__(self):
- return 31
-
- def __eq__(self, other):
- if self is other:
- return True
- elif not isinstance(other, PrecedencePredicate):
- return False
- else:
- return self.precedence == other.precedence
-
-# A semantic context which is true whenever none of the contained contexts
-# is false.
-del AND
-class AND(SemanticContext):
-
- def __init__(self, a:SemanticContext, b:SemanticContext):
- operands = set()
- if isinstance( a, AND ):
- operands.update(a.opnds)
- else:
- operands.add(a)
- if isinstance( b, AND ):
- operands.update(b.opnds)
- else:
- operands.add(b)
-
- precedencePredicates = filterPrecedencePredicates(operands)
- if len(precedencePredicates)>0:
- # interested in the transition with the lowest precedence
- reduced = min(precedencePredicates)
- operands.add(reduced)
-
- self.opnds = list(operands)
-
- def __eq__(self, other):
- if self is other:
- return True
- elif not isinstance(other, AND):
- return False
- else:
- return self.opnds == other.opnds
-
- def __hash__(self):
- h = 0
- for o in self.opnds:
- h = hash((h, o))
- return hash((h, "AND"))
-
- #
- # {@inheritDoc}
- #
- #
- # The evaluation of predicates by this context is short-circuiting, but
- # unordered.
- #
- def eval(self, parser:Recognizer, outerContext:RuleContext):
- return all(opnd.eval(parser, outerContext) for opnd in self.opnds)
-
- def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
- differs = False
- operands = []
- for context in self.opnds:
- evaluated = context.evalPrecedence(parser, outerContext)
- differs |= evaluated is not context
- if evaluated is None:
- # The AND context is false if any element is false
- return None
- elif evaluated is not SemanticContext.NONE:
- # Reduce the result by skipping true elements
- operands.append(evaluated)
-
- if not differs:
- return self
-
- if len(operands)==0:
- # all elements were true, so the AND context is true
- return SemanticContext.NONE
-
- result = None
- for o in operands:
- result = o if result is None else andContext(result, o)
-
- return result
-
- def __str__(self):
- with StringIO() as buf:
- first = True
- for o in self.opnds:
- if not first:
- buf.write("&&")
- buf.write(str(o))
- first = False
- return buf.getvalue()
-
-#
-# A semantic context which is true whenever at least one of the contained
-# contexts is true.
-del OR
-class OR (SemanticContext):
-
- def __init__(self, a:SemanticContext, b:SemanticContext):
- operands = set()
- if isinstance( a, OR ):
- operands.update(a.opnds)
- else:
- operands.add(a)
- if isinstance( b, OR ):
- operands.update(b.opnds)
- else:
- operands.add(b)
-
- precedencePredicates = filterPrecedencePredicates(operands)
- if len(precedencePredicates)>0:
- # interested in the transition with the highest precedence
- s = sorted(precedencePredicates)
- reduced = s[-1]
- operands.add(reduced)
-
- self.opnds = list(operands)
-
- def __eq__(self, other):
- if self is other:
- return True
- elif not isinstance(other, OR):
- return False
- else:
- return self.opnds == other.opnds
-
- def __hash__(self):
- h = 0
- for o in self.opnds:
- h = hash((h, o))
- return hash((h, "OR"))
-
- #
- # The evaluation of predicates by this context is short-circuiting, but
- # unordered.
- #
- def eval(self, parser:Recognizer, outerContext:RuleContext):
- return any(opnd.eval(parser, outerContext) for opnd in self.opnds)
-
- def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
- differs = False
- operands = []
- for context in self.opnds:
- evaluated = context.evalPrecedence(parser, outerContext)
- differs |= evaluated is not context
- if evaluated is SemanticContext.NONE:
- # The OR context is true if any element is true
- return SemanticContext.NONE
- elif evaluated is not None:
- # Reduce the result by skipping false elements
- operands.append(evaluated)
-
- if not differs:
- return self
-
- if len(operands)==0:
- # all elements were false, so the OR context is false
- return None
-
- result = None
- for o in operands:
- result = o if result is None else orContext(result, o)
-
- return result
-
- def __str__(self):
- with StringIO() as buf:
- first = True
- for o in self.opnds:
- if not first:
- buf.write("||")
- buf.write(str(o))
- first = False
- return buf.getvalue()
-
-
-SemanticContext.NONE = Predicate()
\ No newline at end of file
diff --git a/spaces/awacke1/DockerGoFlanT5/README.md b/spaces/awacke1/DockerGoFlanT5/README.md
deleted file mode 100644
index 4b12d55c6c82427b1d3d7c6c10d8c9f16b79777d..0000000000000000000000000000000000000000
--- a/spaces/awacke1/DockerGoFlanT5/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: ✍️Docker Go🚢
-emoji: 🚢Go
-colorFrom: blue
-colorTo: blue
-sdk: docker
-app_port: 8080
-app_file: app.py
-pinned: false
-license: mit
----
-
-
diff --git a/spaces/awacke1/HTML5-Dashboard/index.html b/spaces/awacke1/HTML5-Dashboard/index.html
deleted file mode 100644
index 13a6694233581fedde790ee24cf550e0188007cb..0000000000000000000000000000000000000000
--- a/spaces/awacke1/HTML5-Dashboard/index.html
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-
-
-
- My static Space
-
-
-
-
-
MediaPipe Dashboard
-
-
-
-
-
-
diff --git a/spaces/awacke1/Sentiment-analysis-streamlit/README.md b/spaces/awacke1/Sentiment-analysis-streamlit/README.md
deleted file mode 100644
index d82b0087f38a74df738975e80933d291c3e7670c..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Sentiment-analysis-streamlit/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Sentiment Analysis Streamlit
-emoji: 🏃
-colorFrom: red
-colorTo: pink
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/ctm/CTMWorker.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/ctm/CTMWorker.js
deleted file mode 100644
index 151f5aeb191e3006f5f26d489d199848d34eb854..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/ctm/CTMWorker.js
+++ /dev/null
@@ -1,19 +0,0 @@
-importScripts( "lzma.js", "ctm.js" );
-
-self.onmessage = function ( event ) {
-
- var files = [];
-
- for ( var i = 0; i < event.data.offsets.length; i ++ ) {
-
- var stream = new CTM.Stream( event.data.data );
- stream.offset = event.data.offsets[ i ];
-
- files[ i ] = new CTM.File( stream, [ event.data.data.buffer ] );
-
- }
-
- self.postMessage( files );
- self.close();
-
-};
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/audio/PositionalAudio.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/audio/PositionalAudio.d.ts
deleted file mode 100644
index 87d4fdd7921ef8f64ca45834d65ccf6e7bc88b67..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/audio/PositionalAudio.d.ts
+++ /dev/null
@@ -1,22 +0,0 @@
-import { AudioListener } from './AudioListener';
-import { Audio } from './Audio';
-
-export class PositionalAudio extends Audio {
- constructor(listener: AudioListener);
-
- panner: PannerNode;
-
- setRefDistance(value: number): this;
- getRefDistance(): number;
- setRolloffFactor(value: number): this;
- getRolloffFactor(): number;
- setDistanceModel(value: string): this;
- getDistanceModel(): string;
- setMaxDistance(value: number): this;
- getMaxDistance(): number;
- setDirectionalCone(
- coneInnerAngle: number,
- coneOuterAngle: number,
- coneOuterGain: number
- ): this;
-}
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/geometries/ExtrudeGeometry.js b/spaces/banana-projects/web3d/node_modules/three/src/geometries/ExtrudeGeometry.js
deleted file mode 100644
index 513f18fdf5eefcc1c5c582d43f2eaea0d07b6da3..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/geometries/ExtrudeGeometry.js
+++ /dev/null
@@ -1,832 +0,0 @@
-/**
- * @author zz85 / http://www.lab4games.net/zz85/blog
- *
- * Creates extruded geometry from a path shape.
- *
- * parameters = {
- *
- * curveSegments: , // number of points on the curves
- * steps: , // number of points for z-side extrusions / used for subdividing segments of extrude spline too
- * depth: , // Depth to extrude the shape
- *
- * bevelEnabled: , // turn on bevel
- * bevelThickness: , // how deep into the original shape bevel goes
- * bevelSize: , // how far from shape outline is bevel
- * bevelSegments: , // number of bevel layers
- *
- * extrudePath: // curve to extrude shape along
- *
- * UVGenerator: ",
-
-## 确认
-
-130005 =
-
-## 源码:
-
-- https://huggingface.co/THUDM/chatglm-6b/blob/main/tokenization_chatglm.py#L32
-
-"""
-import os
-from transformers import AutoTokenizer
-
-os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
-# tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
-tokenizer = AutoTokenizer.from_pretrained("tokenizer", trust_remote_code=True)
-
-
-def encode_text(text):
- """
- 能够编码
- """
- tokens = tokenizer.tokenize(text)
- token_id = tokenizer.encode(text=text, add_special_tokens=False)
- decoded_text = tokenizer.decode(token_id)
- print("tokens: ", tokens, ";\tid: ", token_id, ";\ttext: ", decoded_text)
-
-
-def test_space():
- # " " 编码后是空的
- for text in [" ", "\t", "你是谁", "你是\n谁", "你是 谁", "你是 谁", "'[Round 0]\n问:你是谁\n答:我是一个名为 ChatGLM-6B 的人工智能助手,是基于清华大学 KEG 实验室和智谱 AI 公司于 2023 年共同训练的语言模型开发的。我的任务是针对用户的问题和要求提供适当的答复和支持。\n[Round 1]\n问:你会干什么\n答:"]:
- encode_text(text)
-
-
-def test_case():
- for text in ["Good morning", "good morning", "good morning", "goog morningabc"]:
- encode_text(text)
-
-def export():
- with open("chatglm.vocab", "w", encoding="utf-8") as f_out:
- vocab_size = len(tokenizer.sp_tokenizer.text_tokenizer.proto.pieces)
- for i in range(vocab_size):
- f_out.write(tokenizer.sp_tokenizer.text_tokenizer.proto.pieces[i].piece + "\n")
-
-
-# export()
-
-
-def test_tokens():
- tokens = [43435]
- tokens = [ 53, 6945, 5, 8, 42, 4, 64286, 12, 74874,
- 4, 67342, 12, 74874, 130328, 130247, 130233, 130227, 35,
- 65806, 68241, 75890, 14132, 5388, 340, 11, 21, 222,
- 6, 76693, 66877, 63852, 6, 66430, 68747, 102501, 63823,
- 4, 52, 6945, 5, 9, 42, 4, 64286, 12,
- 65450, 83400, 64213, 66846, 4, 67342, 12, 130001, 130004,
- 74747, 83400, 66115, 90478, 70597, 63826, 68076, 6, 63873,
- 68684, 64113, 120922, 73129, 63823, 65056, 63829, 63948, 64124,
- 79727, 64447, 12, 4, 4, 9, 7, 5, 64716,
- 93067, 95119, 64560, 12, 66524, 63827, 70682, 63944, 89160,
- 63826, 71304, 6, 79553, 67155, 63826, 68668, 63843, 91351,
- 96846, 63823, 4, 4, 10, 7, 5, 95472, 74107,
- 66625, 64285, 12, 64442, 67201, 69609, 63824, 81548, 63824,
- 70870, 63826, 66800, 6, 94824, 63959, 65195, 65515, 63824,
- 64392, 69584, 63824, 81198, 63914, 63835, 63823, 4, 4,
- 13, 7, 5, 66544, 69656, 12, 66533, 63891, 63948,
- 66544, 69726, 6, 63906, 86089, 63824, 88419, 63824, 69765,
- 63853, 64369, 102753, 64736, 63823, 4, 4, 16, 7,
- 5, 65073, 63827, 72151, 64020, 67491, 66469, 63853, 68168,
- 12, 65289, 95128, 63826, 68819, 6, 118679, 66115, 64174,
- 66625, 63823, 4, 4, 15, 7, 5, 86790, 12,
- 70666, 89266, 63878, 66544, 69656, 6, 67623, 73129, 63823,
- 4, 4, 21, 7, 71210, 79856, 63912, 63831, 66625,
- 69204, 64659, 12, 66312, 63922, 64984, 67427, 63824, 63959,
- 65419, 63853, 64384, 63835, 63823, 4, 4, 63976, 106490,
- 65921, 64542, 73129, 6, 63852, 80917, 65207, 64678, 63853,
- 66625, 64427, 6, 89385, 64124, 79727, 64447, 63823, 130005]
- # print(tokenizer.decode(tokens))
- start_idx = 0 # chatglm里的token_id是从0开始的
- # start_idx = 20000 # 默认词典,前20000是图片
- for i, token in enumerate(tokens):
- # print(i, token, tokenizer.decode([token - start_idx]))
- # print(tokenizer.sp_tokenizer.text_tokenizer.proto.pieces[token - start_idx].piece, end=" ")
- print(i, token, tokenizer.sp_tokenizer.text_tokenizer.proto.pieces[token - start_idx].piece)
-
-
-test_tokens()
-
-# tokenizer.sp_tokenizer.text_tokenizer.convert_token_to_id(x) + tokenizer.sp_tokenizer.num_image_tokens
-
-# test_case()
-# test_space()
-
-
-
-
-# s
-
-
diff --git a/spaces/exbert-project/exbert/client/src/ts/vis/AttentionHeadBox.ts b/spaces/exbert-project/exbert/client/src/ts/vis/AttentionHeadBox.ts
deleted file mode 100644
index 505a7502cddab294b8356500cd4c485f298e48ff..0000000000000000000000000000000000000000
--- a/spaces/exbert-project/exbert/client/src/ts/vis/AttentionHeadBox.ts
+++ /dev/null
@@ -1,229 +0,0 @@
-import * as d3 from "d3";
-import { VComponent } from "./VisComponent";
-import { SimpleEventHandler } from "../etc/SimpleEventHandler";
-import { D3Sel } from "../etc/Util";
-import { SVG } from "../etc/SVGplus"
-import * as tf from '@tensorflow/tfjs'
-import { Tensor3D } from "@tensorflow/tfjs";
-
-// The below two (interface and function) can become a class
-export type AttentionHeadBoxI = {
- rows: number[][],
- labels: number[],
- max: number,
-}
-
-/**
- * From an attention matrix selected by layer, show a summary of the attentions belonging to each head.
- *
- * @param headMat The matrix representing all the attentions by head (layer already selected)
- * @param headList The heads that are selected
- * @param side Is this the right or the left display?
- * @param tokenInd If not null, select just the information from a single token across heads
- * @returns Information needed to label the headbox
- */
-export function getAttentionInfo(headMat: number[][][], headList: number[], side: "right" | "left" = "left", token: null | {ind: number, side: "left" | "right"}=null): AttentionHeadBoxI {
- // Collect only from headlist, average each head, transpose to ease iteration
- if (headList.length == 0) {
- return {
- rows: [[]],
- labels: [],
- max: 0,
- }
- }
-
- let dim = null
- // Only change the attention graph opposite selected token
- if (token != null && (token.side != side)) {
- dim = token.side == "left" ? -2 : -1 // Assign to "from" direction if "left"
- }
-
- let axis: number = side == "left" ? 2 : 1;
-
- // average across the axis representing the attentions.
- let gatheredMat = tf.tensor3d(headMat)
- if (dim != null) {
- gatheredMat = gatheredMat.gather([token.ind], dim)
- }
- let newMat = gatheredMat.gather(headList, 0).mean([axis]).transpose();
-
- const rowInfo = newMat.arraySync();
-
- const out: AttentionHeadBoxI = {
- rows: rowInfo,
- labels: headList,
- max: newMat.max().arraySync(),
- }
-
- return out
-}
-
-interface CurrentOptions {
- headHeight: number
- headWidth: number
- xPad: number
- yPad: number
- boxWidth: number
- totalWidth: number
- totalHeight: number
-};
-
-export class AttentionHeadBox extends VComponent{
- css_name = '';
- rowCssName = 'att-head';
- boxCssName = 'att-rect';
-
- static events = {
- rowMouseOver: "AttentionHeadBox_RowMouseOver",
- rowMouseOut: "AttentionHeadBox_RowMouseOut",
- boxMouseOver: "AttentionHeadBox_BoxMouseOver",
- boxMouseOut: "AttentionHeadBox_BoxMouseOut",
- boxMouseMove: "AttentionHeadBox_BoxMouseMove",
- boxClick: "AttentionHeadBox_BoxClick",
- };
-
- _data: AttentionHeadBoxI;
-
- _current: Partial = {}
-
- options = {
- boxDim: 26,
- yscale: 1, // Amount to scale boxheight to get individual heads
- xscale: 0.5, // Amount to scale boxwidth to get individual heads
- side: "left",
- maxWidth: 200, // Maximum width of SVG
- offset: 0, // Change to 1 if you desire the offset visualization for Autoregressive models
- };
-
- // D3 Components
- headRows: D3Sel;
- headCells: D3Sel;
- opacityScale: d3.ScaleLinear;
-
- constructor(d3Parent: D3Sel, eventHandler?: SimpleEventHandler, options: {} = {}) {
- super(d3Parent, eventHandler);
- this.superInitSVG(options);
- this._init()
- }
-
- _init() {
- this.headRows = this.base.selectAll(`.${this.rowCssName}`)
- this.headCells = this.headRows.selectAll(`${this.boxCssName}`)
- this.opacityScale = d3.scaleLinear().range([0, 1]);
- }
-
- private updateCurrent(): Partial {
- const op = this.options
- const cur = this._current
-
- const nHeads = this._data.rows[0].length
- const baseHeadWidth = op.boxDim * op.xscale
-
- // Scale headwidth according to maximum width
- const getHeadScale = (nH) => (Math.min(op.maxWidth / nH, baseHeadWidth) / baseHeadWidth) * op.xscale;
-
- cur.headHeight = op.boxDim * op.yscale;
- cur.headWidth = getHeadScale(nHeads) * op.boxDim;
- cur.xPad = cur.headWidth;
- cur.yPad = (op.boxDim - cur.headHeight) / 2;
-
- const getBoxWidth = (headWidth) => {
- const maxBwidth = 100;
- const bwidth = this._data.rows[0].length * cur.headWidth
- const scale = d3.scaleLinear
- if (bwidth > maxBwidth) {
- return
- }
-
- }
-
- cur.boxWidth = (this._data.rows[0].length * cur.headWidth);
- cur.totalWidth = (2 * cur.xPad) + cur.boxWidth;
- cur.totalHeight = (op.boxDim * (this._data.rows.length + op.offset));
-
- return this._current
- }
-
- private updateData() {
- const op = this.options;
- const self = this;
- const boxEvent = (i) => { return { ind: i, side: op.side, head: self._data.labels[i] } }
- const cur = this.updateCurrent()
-
- const getBaseX = () => (self.base.node()).getBoundingClientRect().left
- const getBaseY = () => (self.base.node()).getBoundingClientRect().top
-
- this.base.html('');
-
- this.parent
- .attr("width", cur.totalWidth)
- .attr("height", cur.totalHeight)
-
- this.headRows = this.base.selectAll(`.${self.rowCssName}`)
- .data(self._data.rows)
- .join("g")
- .attrs({
- class: (d, i) => `${self.rowCssName} ${self.rowCssName}-${i}`,
- transform: (d, i) => {
- return SVG.translate(
- {
- x: cur.xPad,
- y: (op.boxDim * (i + op.offset)) + cur.yPad,
- })
- },
- width: cur.boxWidth,
- height: cur.headHeight,
-
- })
- .on("mouseover", (d, i) => {
- self.eventHandler.trigger(AttentionHeadBox.events.rowMouseOver, { ind: i, side: op.side })
- })
- .on("mouseout", (d, i) => {
- self.eventHandler.trigger(AttentionHeadBox.events.rowMouseOut, { ind: i, side: op.side })
- })
-
- this.headCells = this.headRows
- .selectAll(`${this.boxCssName}`)
- .data(d => d)
- .join('rect')
- .attrs({
- x: (d, i) => i * cur.headWidth,
- y: 0,
- class: this.boxCssName,
- head: (d, i) => self._data.labels[i],
- width: cur.headWidth,
- height: cur.headHeight,
- opacity: (d: number) => this.opacityScale(d),
- fill: "blue"
- })
- .on("mouseover", (d, i) => {
- self.eventHandler.trigger(AttentionHeadBox.events.boxMouseOver, boxEvent(i))
- })
- .on("mouseout", (d, i) => {
- self.eventHandler.trigger(AttentionHeadBox.events.boxMouseOut, boxEvent(i))
- })
- .on("click", (d, i) => {
- self.eventHandler.trigger(AttentionHeadBox.events.boxClick, boxEvent(i))
- })
- .on("mousemove", function(d, i) {
- const op = self.options
- const mouse = d3.mouse(self.base.node())
-
- self.eventHandler.trigger(AttentionHeadBox.events.boxMouseMove, { ind: i, side: op.side, baseX: getBaseX(), baseY: getBaseY(), mouse: mouse })
-
- })
- .append("svg:title")
- .text((d, i) => "Head " + (self._data.labels[i] + 1))
- }
-
-
- _wrangle(data: AttentionHeadBoxI) {
- this._data = data;
- this.opacityScale = this.opacityScale.domain([0, data.max])
- return data;
- }
-
- _render(data: AttentionHeadBoxI) {
- this.updateData();
- }
-}
\ No newline at end of file
diff --git a/spaces/f2api/gpt-academic/docs/README_JP.md b/spaces/f2api/gpt-academic/docs/README_JP.md
deleted file mode 100644
index 1df2b0a9cf200ca5be348e9178dcf478558c7d0f..0000000000000000000000000000000000000000
--- a/spaces/f2api/gpt-academic/docs/README_JP.md
+++ /dev/null
@@ -1,329 +0,0 @@
-> **Note**
->
-> このReadmeファイルは、このプロジェクトのmarkdown翻訳プラグインによって自動的に生成されたもので、100%正確ではない可能性があります。
->
-> When installing dependencies, please strictly choose the versions specified in `requirements.txt`.
->
-> `pip install -r requirements.txt`
->
-
-# GPT 学术优化 (GPT Academic)
-
-**もしこのプロジェクトが好きなら、星をつけてください。もしあなたがより良いアカデミックショートカットまたは機能プラグインを思いついた場合、Issueをオープンするか pull request を送信してください。私たちはこのプロジェクト自体によって翻訳された[英語 |](README_EN.md)[日本語 |](README_JP.md)[한국어 |](https://github.com/mldljyh/ko_gpt_academic)[Русский |](README_RS.md)[Français](README_FR.md)のREADMEも用意しています。
-GPTを使った任意の言語にこのプロジェクトを翻訳するには、[`multi_language.py`](multi_language.py)を読んで実行してください。 (experimental)。
-
-> **注意**
->
-> 1. **赤色**で表示された関数プラグイン(ボタン)のみ、ファイルの読み取りをサポートしています。一部のプラグインは、プラグインエリアの**ドロップダウンメニュー**内にあります。また、私たちはどんな新しいプラグインのPRでも、**最優先**で歓迎し、処理します!
->
-> 2. このプロジェクトの各ファイルの機能は、自己解析の詳細説明書である[`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)で説明されています。バージョンが進化するにつれて、関連する関数プラグインをいつでもクリックし、GPTを呼び出してプロジェクトの自己解析レポートを再生成することができます。よくある問題は[`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)にまとめられています。[インストール方法](#installation)。
-
-> 3. このプロジェクトは、chatglmやRWKV、パンクなど、国内の大規模自然言語モデルを利用することをサポートし、試みることを奨励します。複数のAPIキーを共存することができ、設定ファイルに`API_KEY="openai-key1,openai-key2,api2d-key3"`のように記入することができます。`API_KEY`を一時的に変更する場合は、入力エリアに一時的な`API_KEY`を入力してEnterキーを押せば、それが有効になります。
-
-
-
- All buttons are dynamically generated by reading functional.py, and custom functions can be freely added to free the clipboard.
-
-
-
-
-
-- Polishing/Correction
-
-
-
-
-
-- If the output contains formulas, they are displayed in both TeX and rendering forms, making it easy to copy and read.
-
-
-
-
-
-- Don't feel like looking at the project code? Just ask chatgpt directly.
-
-
-
-
-
-
-- Mixed calls of multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
-
-
-
-
-
----
-
-# Installation
-
-## Installation-Method 1: Directly run (Windows, Linux or MacOS)
-
-1. Download the project.
-
-```sh
-git clone https://github.com/binary-husky/chatgpt_academic.git
-cd chatgpt_academic
-```
-
-2. Configure the API_KEY.
-
-Configure the API KEY and other settings in `config.py` and [special network environment settings](https://github.com/binary-husky/gpt_academic/issues/1).
-
-(P.S. When the program is running, it will first check if there is a private configuration file named `config_private.py`, and use the configuration in it to override the same name configuration in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Reading priority: `environment variables` > `config_private.py` > `config.py`)
-
-3. Install dependencies.
-
-```sh
-# (Choose I: If familiar with Python)(Python version 3.9 or above, the newer the better) Note: Use the official pip source or Ali pip source. Temporary switching source method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
-python -m pip install -r requirements.txt
-
-# (Choose II: If not familiar with Python) Use anaconda, the steps are the same (https://www.bilibili.com/video/BV1rc411W7Dr):
-conda create -n gptac_venv python=3.11 # Create anaconda environment.
-conda activate gptac_venv # Activate the anaconda environment.
-python -m pip install -r requirements.txt # This step is the same as the pip installation step.
-```
-
-If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, click to expand.
-
-
-[Optional Steps] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (precondition: familiar with Python + used Pytorch + computer configuration). Strong enough):
-
-```sh
-# Optional step I: support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The version installed above is torch+cpu version, using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).
-python -m pip install -r request_llm/requirements_chatglm.txt
-
-# Optional Step II: Support Fudan MOSS.
-python -m pip install -r request_llm/requirements_moss.txt
-git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, it must be in the project root.
-
-# 【Optional Step III】Ensure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports the docker solution):
-AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
-```
-
-
-
-
-
-
-4. Run.
-
-```sh
-python main.py
-```5. Testing Function Plugin
-```
-- Test function plugin template function (requires gpt to answer what happened today in history), you can use this function as a template to implement more complex functions
- Click "[Function Plugin Template Demo] Today in History"
-```
-
-## Installation-Methods 2: Using Docker
-
-1. Only ChatGPT (recommended for most people)
-
- ``` sh
-git clone https://github.com/binary-husky/chatgpt_academic.git # Download project
-cd chatgpt_academic # Enter path
-nano config.py # Edit config.py with any text editor ‑ configure "Proxy," "API_KEY," "WEB_PORT" (e.g., 50923) and more
-docker build -t gpt-academic . # installation
-
-#(Last step-Option 1) In a Linux environment, `--net=host` is more convenient and quick
-docker run --rm -it --net=host gpt-academic
-#(Last step-Option 2) In a macOS/windows environment, the -p option must be used to expose the container port (e.g., 50923) to the port on the host.
-docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
-```
-
-2. ChatGPT + ChatGLM + MOSS (requires familiarity with Docker)
-
-``` sh
-# Modify docker-compose.yml, delete plans 1 and 3, and retain plan 2. Modify the configuration of plan 2 in docker-compose.yml, and reference the comments for instructions.
-docker-compose up
-```
-
-3. ChatGPT + LLAMA + Pangu + RWKV (requires familiarity with Docker)
-``` sh
-# Modify docker-compose.yml, delete plans 1 and 2, and retain plan 3. Modify the configuration of plan 3 in docker-compose.yml, and reference the comments for instructions.
-docker-compose up
-```
-
-
-## Installation-Method 3: Other Deployment Methods
-
-1. How to use proxy URL/Microsoft Azure API
-Configure API_URL_REDIRECT according to the instructions in `config.py`.
-
-2. Remote Cloud Server Deployment (requires cloud server knowledge and experience)
-Please visit [Deployment Wiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
-
-3. Using WSL2 (Windows Subsystem for Linux Subsystem)
-Please visit [Deployment Wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
-
-4. How to run on a secondary URL (such as `http://localhost/subpath`)
-Please visit [FastAPI Running Instructions](docs/WithFastapi.md)
-
-5. Run with docker-compose
-Please read docker-compose.yml and follow the instructions provided therein.
----
-# Advanced Usage
-## Customize new convenience buttons/custom function plugins
-
-1. Custom new convenience buttons (academic shortcut keys)
-Open `core_functional.py` with any text editor, add the item as follows, and restart the program. (If the button has been added successfully and is visible, the prefix and suffix support hot modification without restarting the program.)
-example:
-```
-"Super English to Chinese Translation": {
- # Prefix, which will be added before your input. For example, used to describe your request, such as translation, code interpretation, polish, etc.
- "Prefix": "Please translate the following content into Chinese, and explain the proper nouns in the text in a markdown table one by one:\n\n",
-
- # Suffix, which will be added after your input. For example, in combination with the prefix, you can surround your input content with quotation marks.
- "Suffix": "",
-},
-```
-
-
-
-
-2. Custom function plugins
-
-Write powerful function plugins to perform any task you can and cannot think of.
-The difficulty of writing and debugging plugins in this project is low, and as long as you have a certain amount of python basic knowledge, you can follow the template provided by us to achieve your own plugin functions.
-For details, please refer to the [Function Plugin Guide](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
-
----
-# Latest Update
-## New feature dynamics.
-1. ダイアログの保存機能。関数プラグインエリアで '現在の会話を保存' を呼び出すと、現在のダイアログを読み取り可能で復元可能なHTMLファイルとして保存できます。さらに、関数プラグインエリア(ドロップダウンメニュー)で 'ダイアログの履歴保存ファイルを読み込む' を呼び出すことで、以前の会話を復元することができます。Tips:ファイルを指定せずに 'ダイアログの履歴保存ファイルを読み込む' をクリックすることで、過去のHTML保存ファイルのキャッシュを表示することができます。'すべてのローカルダイアログの履歴を削除' をクリックすることで、すべてのHTML保存ファイルのキャッシュを削除できます。
-
-
-
-## バージョン:
-- version 3.5(作業中):すべての関数プラグインを自然言語で呼び出すことができるようにする(高い優先度)。
-- version 3.4(作業中):chatglmのローカルモデルのマルチスレッドをサポートすることで、機能を改善する。
-- version 3.3:+Web情報の総合機能
-- version 3.2:関数プラグインでさらに多くのパラメータインターフェイスをサポートする(ダイアログの保存機能、任意の言語コードの解読+同時に任意のLLM組み合わせに関する問い合わせ)
-- version 3.1:複数のGPTモデルを同時に質問できるようになりました! api2dをサポートし、複数のAPIキーを均等に負荷分散することができます。
-- version 3.0:chatglmとその他の小型LLMのサポート。
-- version 2.6:プラグイン構造を再構築し、対話内容を高め、より多くのプラグインを追加しました。
-- version 2.5:自己アップデートし、長文書やトークンのオーバーフローの問題を解決しました。
-- version 2.4:(1)全文翻訳のPDF機能を追加しました。(2)入力エリアの位置切り替え機能を追加しました。(3)垂直レイアウトオプションを追加しました。(4)マルチスレッド関数プラグインを最適化しました。
-- version 2.3:マルチスレッド性能の向上。
-- version 2.2:関数プラグインのホットリロードをサポートする。
-- version 2.1:折りたたみ式レイアウト。
-- version 2.0:モジュール化された関数プラグインを導入。
-- version 1.0:基本機能
-
-gpt_academic開発者QQグループ-2:610599535
-
-- 既知の問題
- - 一部のブラウザ翻訳プラグインが、このソフトウェアのフロントエンドの実行を妨害する
- - gradioバージョンが高すぎるか低すぎると、多くの異常が引き起こされる
-
-## 参考学習
-
-```
-コードの中には、他の優れたプロジェクトの設計から参考にしたものがたくさん含まれています:
-
-# プロジェクト1:清華ChatGLM-6B:
-https://github.com/THUDM/ChatGLM-6B
-
-# プロジェクト2:清華JittorLLMs:
-https://github.com/Jittor/JittorLLMs
-
-# プロジェクト3:Edge-GPT:
-https://github.com/acheong08/EdgeGPT
-
-# プロジェクト4:ChuanhuChatGPT:
-https://github.com/GaiZhenbiao/ChuanhuChatGPT
-
-# プロジェクト5:ChatPaper:
-https://github.com/kaixindelele/ChatPaper
-
-# その他:
-https://github.com/gradio-app/gradio
-https://github.com/fghrsh/live2d_demo
-```
\ No newline at end of file
diff --git a/spaces/facebook/MusicGen/audiocraft/modules/lstm.py b/spaces/facebook/MusicGen/audiocraft/modules/lstm.py
deleted file mode 100644
index c0866175950c1ca4f6cca98649525e6481853bba..0000000000000000000000000000000000000000
--- a/spaces/facebook/MusicGen/audiocraft/modules/lstm.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from torch import nn
-
-
-class StreamableLSTM(nn.Module):
- """LSTM without worrying about the hidden state, nor the layout of the data.
- Expects input as convolutional layout.
- """
- def __init__(self, dimension: int, num_layers: int = 2, skip: bool = True):
- super().__init__()
- self.skip = skip
- self.lstm = nn.LSTM(dimension, dimension, num_layers)
-
- def forward(self, x):
- x = x.permute(2, 0, 1)
- y, _ = self.lstm(x)
- if self.skip:
- y = y + x
- y = y.permute(1, 2, 0)
- return y
diff --git a/spaces/falterWliame/Face_Mask_Detection/Archisuite 17 Cracked.md b/spaces/falterWliame/Face_Mask_Detection/Archisuite 17 Cracked.md
deleted file mode 100644
index 8186308fe475c73d0cfc779b638f4a5e217f0b6a..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Archisuite 17 Cracked.md
+++ /dev/null
@@ -1,72 +0,0 @@
-
-
-Here is the list of changes and improvements from the last released version:
-
-* The exception handling function in the print/draw commands has been improved.
-
-* Class: RectangleZ is available as default option for Revit size
-
-* Class: Reference has been added as an example.
-
-* Class: BuildingGroup
-
-* Class: Layer is available with default option.
-
-* Class: Surface has been added with default option and the special curves added.
-
-* Class: UnknownElement is used to avoid problems during import.
-
-* New command: archicad_get_environment for reading the parameter values in the.ini file.
-
-* New function: archicad_set_environment for setting the parameter values in the.ini file.
-
-* New function: archicad_get_environment_changed for determining if the parameter values in the.ini file were changed.
-
-* New command: archicad_get_skin.
-
-* New command: archicad_save_file.
-
-* New function: archicad_add_files.
-
-* New function: archicad_get_files.
-
-* New function: archicad_is_file_changed.
-
-* New function: archicad_get_stamp.
-
-* New function: archicad_set_stamp.
-
-* New command: archicad_set_skin.
-
-* New command: archicad_set_skin_name.
-
-* New command: archicad_get_skin_name.
-
-* New function: archicad_get_skin_path.
-
-* New function: archicad_get_skin_path_locked.
-
-* New function: archicad_set_skin_path.
-
-* New function: archicad_set_skin_path_locked.
-
-* New function: archicad_set_skin_locked.
-
-* New function: archicad_remove_skin.
-
-* New function: archicad_get_skin.
-
-* New function: archicad_get_skin_changed.
-
-* New command: archicad_update_skin.
-
-* New function: archicad_is_skin_changed.
-
-* New function: archicad_update_skin_name.
-
-* New function: archicad_set_skin_name.
-
-* New function: archicad_set_skin_ 4fefd39f24
-
-
-
diff --git a/spaces/falterWliame/Face_Mask_Detection/Autodesk AutoCAD 2010 (Portable) Updated.md b/spaces/falterWliame/Face_Mask_Detection/Autodesk AutoCAD 2010 (Portable) Updated.md
deleted file mode 100644
index ac5094ff7cbca88ab6d5b53194c882399323a612..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Autodesk AutoCAD 2010 (Portable) Updated.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
one of the new, built-in extensions is autocad 2010 (portable) product extensions. over twenty-two extension tools are now available in autocad 2010 (portable) to help you complete projects easier and faster.
with the exciting release of autocad 2010, autodesk brings its award-winning, vector-based 2d cad solution to mobile device users. autocad 2010 for windows mobile now provides unprecedented ease of use and features that allow you to work on the go with a single device. autocad 10 customers can now enjoy over 25 new features, including dynamic block sizing, large blocks and an enhanced 3d printing tool.
-
previous versions of autocad were all standalone products that were tied to a specific platform. autocad 2010, however, is designed to be a cross-platform, cross-tool solution, with files that can be used across all autocad products and most 3d modeling products.
-
over the last year and a half, autocad has been transformed from a desktop cad product into a cross-platform, cross-product cad solution. autocad's ribbon interface can be adapted to any version of autocad, and new commands are added in each release of autocad. autodesk is also focusing on making autocad easier to use by making commands more intuitive and making the program more valuable to all users. users can now open autocad files from the web, creating one continuous cad experience regardless of the platform.
-
-
when you create a new drawing, the drawing preview tab of the dock is updated with a preview of the drawing that you create. the newly added option to disable the lock button on the document toolbar also makes it easier to create drawings that are shared on a network.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/falterWliame/Face_Mask_Detection/Moonu [PATCHED] Full Movie Hd 1080p Blu Ray 23.md b/spaces/falterWliame/Face_Mask_Detection/Moonu [PATCHED] Full Movie Hd 1080p Blu Ray 23.md
deleted file mode 100644
index 5874f1527946578d8a27e03dbe412c607fb055de..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Moonu [PATCHED] Full Movie Hd 1080p Blu Ray 23.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
How to Download and Use Power System Toolbox for MATLAB
-
Power System Toolbox (PST) is a MATLAB-based software package that provides tools for power system analysis and simulation. PST can help you perform tasks such as:
Modeling power generation equipment and grid integration
-
Performing harmonic analysis and power quality assessment
-
Optimizing power system operation and dispatch
-
Developing and testing control algorithms for power system equipment
-
-
In this article, we will show you how to download and install PST, and how to use some of its features.
-
-
Downloading and Installing PST
-
PST is available as a free download from the MATLAB File Exchange[^3^]. To download PST, you need to have a MathWorks account and a valid license for MATLAB. You can create a MathWorks account for free here.
-
Once you have logged in to your MathWorks account, go to the PST page on the File Exchange[^3^] and click on the "Download" button. You will get a ZIP file containing the PST files and folders. Extract the ZIP file to a location of your choice on your computer.
-
To install PST, you need to add its folder to the MATLAB path. You can do this by opening MATLAB and typing the following command in the Command Window:
-
-addpath('C:\PST')
-
Replace C:\PST with the actual path where you extracted the PST files. You can also use the "Set Path" dialog box in MATLAB to add the folder to the path.
-
To verify that PST is installed correctly, type pstversion in the Command Window. You should see something like this:
-PST Version 3.1.1 (2014-11-20)
-
-
Using PST for Power System Analysis and Simulation
-
PST provides several functions and models for power system analysis and simulation. Some of the main functions are:
-
-
s_simu: This function performs time-domain simulation of power systems using a network solution method. It can handle both balanced and unbalanced systems, as well as different types of faults and disturbances.
-
s_stdy: This function performs steady-state analysis of power systems using a Newton-Raphson method. It can calculate power flows, bus voltages, line currents, losses, etc.
-
s_pf: This function performs optimal power flow (OPF) analysis of power systems using a linear programming method. It can optimize the generation dispatch, voltage control, transmission losses, etc.
-
s_simu_Batch: This function performs batch simulation of power systems using s_simu. It can run multiple simulations with different input data and settings.
-
-
PST also provides several models of power system equipment, such as generators, transformers, loads, lines, breakers, etc. These models are stored in MATLAB data files (M-files) with the extension .m. You can edit these files to change the parameters or characteristics of the models.
-
-
Example: Simulating a Three-Bus System with PST
-
To illustrate how to use PST for power system simulation, we will use an example of a three-bus system with two generators and one load. The system diagram is shown below:
-
-
-
-
The data for this system are given in the table below:
-
-
-
Bus
Type
Voltage (pu)
Angle (deg)
P (MW)
Q (MVar)
-
1
PV
1.05
0
-40
-10
-
2
PQ
-
- d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/felix-weiland/llama_index_demo/README.md b/spaces/felix-weiland/llama_index_demo/README.md
deleted file mode 100644
index 3a8029ebdb8698f8f1755e2421348d1dd203648a..0000000000000000000000000000000000000000
--- a/spaces/felix-weiland/llama_index_demo/README.md
+++ /dev/null
@@ -1,31 +0,0 @@
----
-title: Llama Index Demo
-emoji: 🏃
-colorFrom: gray
-colorTo: indigo
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
-duplicated_from: jakobf/llama_index_demo
----
-
-# Interview your reviews
-
-You can ask questions with natural language. If you want to ask a follow up questions, remember to include the context in the new prompt as the app doesn't include the context from the chat history yet.
-
-
-
-
-You can use the Reviews tab to search through all reviews.
-
-
-
-
-You can also ask questions and get responses in english. Just like with ChatGPT.
-
-
-
-
-
-
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Narsen Ninja Shinobi APK and Become a Legendary Ninja.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Narsen Ninja Shinobi APK and Become a Legendary Ninja.md
deleted file mode 100644
index 8f61f92c8494b2410822d14781bdad7b491341fe..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Narsen Ninja Shinobi APK and Become a Legendary Ninja.md
+++ /dev/null
@@ -1,72 +0,0 @@
-
-
Narsen Ninja Shinobi APK Download: A Naruto Fan's Dream Game
-
If you are a fan of Naruto, the popular anime and manga series about ninjas, you might have heard of narsen ninja shinobi, a fan-made game that features characters and settings from the Naruto universe. Narsen ninja shinobi is an action-packed game that lets you choose your favorite character from any anime like pirate, super warrior, hunter, and more, and discover a new dimension of ninja battles. In this article, we will tell you everything you need to know about narsen ninja shinobi, including its features, review, tips and tricks, and how to download it for free.
-
Features of Narsen Ninja Shinobi
-
Narsen ninja shinobi is a game that contains a very large variety of powerful heroes and warriors, each with their own unique skills, abilities, and weapons. You can play as one of the four main classes: attack, defense, ranged, or heal. You can also customize your character with different costumes, accessories, and skills. The game has several modes to choose from, such as one-to-one, team fight, survival, mission, and story. You can also play online with other players or offline with bots.
The game has 7+ powerful maps to explore, each with different themes and challenges. You can also experience different arcs from the Naruto series, such as the Chunin Exams, the Akatsuki Invasion, the Great Ninja War, and more. The game also has a bonus stage where you can use your shurikens to take down incoming enemies in a first-person perspective. The game has stunning graphics, smooth animations, and epic sound effects that make you feel like you are in the Naruto world.
-
Review of Narsen Ninja Shinobi
-
Narsen ninja shinobi is a game that has received a lot of positive feedback from Naruto fans who have played it. The game is praised for its faithful adaptation of the Naruto series, its variety of characters and skills, its fun and addictive gameplay, and its easy-to-use controls. The game is also updated regularly with new content and features to keep the players engaged.
-
However, the game also has some drawbacks that might affect your enjoyment. The game is not an official product of Naruto or Sega, so it might have some bugs, glitches, or errors that need to be fixed. The game also requires a lot of storage space on your device, so you might need to delete some files or apps to make room for it. The game also might not be compatible with some devices or operating systems, so you might need to check if your device meets the minimum requirements before downloading it.
-
Overall, narsen ninja shinobi is a game that is worth trying if you are a fan of Naruto or anime in general. The game offers a lot of fun and excitement for anyone who loves ninjas and battles. The game is also free to download and play, so you don't have to worry about spending any money on it.
-
Tips and Tricks for Narsen Ninja Shinobi
-
If you want to play narsen ninja shinobi effectively and enjoy it more, here are some tips and tricks that might help you:
-
-
Choose your character wisely. Each character has their own strengths and weaknesses, so you need to pick one that suits your playstyle and preference. You can also switch characters during the game if you want to try different ones.
-
Use your skills strategically. Each character has two ninjutsu skills that can be activated by pressing certain buttons. These skills can deal damage, heal allies, stun enemies, or boost your stats. However, they also have cooldowns and costs chakra (energy), so you need to use them wisely.
-
Master your movement. The game allows you to move freely on the map using different actions such as running, jumping, sliding, wall-running, grappling, and teleporting. You need to master your movement to dodge attacks, chase enemies, or escape danger.
-
Collect items and power-ups. The game has various items and power-ups that can be found on the map or dropped by enemies. These items can restore your health, chakra, or stamina, or give you temporary boosts such as speed, damage, or defense. You can also use items to summon allies or unleash ultimate attacks.
-
Work with your team. The game is more fun and rewarding when you play with your friends or other players online. You can communicate with your team using the chat or voice feature, and coordinate your strategies and tactics. You can also help each other by healing, supporting, or reviving each other.
-
-
Conclusion
-
Narsen ninja shinobi is a game that is a must-have for any Naruto fan who wants to experience the thrill and excitement of ninja battles. The game has a lot of features, modes, characters, and maps that will keep you entertained for hours. The game also has amazing graphics, sound effects, and animations that will make you feel like you are in the Naruto world. The game is also easy to download and play, and it is free of charge.
-
narsen ultimate shinobi war apk by kz
-narsen mod ultimate shinobi war apk
-narsen ninja storm 4 apk download
-narsen shinobi heroes apk mod
-narsen ninja blazing apk latest version
-narsen shinobi striker apk offline
-narsen ninja impact apk free download
-narsen shinobi collection apk english
-narsen ninja legends apk unlimited money
-narsen shinobi rebirth apk update
-narsen ninja voltage apk mod menu
-narsen shinobi generations apk full
-narsen ninja heroes apk original
-narsen shinobi alliance apk hack
-narsen ninja battle apk online
-narsen shinobi awakening apk data
-narsen ninja storm 3 apk obb
-narsen shinobi evolution apk android
-narsen ninja senki apk modded
-narsen shinobi master apk premium
-narsen ninja saga apk offline mode
-narsen shinobi world apk new version
-narsen ninja rise apk unlimited coins
-narsen shinobi legend apk cheat
-narsen ninja revolution apk no root
-narsen shinobi clash apk unlocked
-narsen ninja adventure apk pro
-narsen shinobi chronicles apk cracked
-narsen ninja storm 2 apk file
-narsen shinobi fighter apk download link
-
If you want to download narsen ninja shinobi for free, you can follow these simple steps:
-
-
Go to the official website of narsen ninja shinobi (https://narsenninjashinobi.com/).
-
Click on the download button and choose your device (Android or iOS).
-
Wait for the download to finish and install the game on your device.
-
Open the game and enjoy!
-
-
FAQs
-
Q: Is narsen ninja shinobi safe to download?
-
A: Yes, narsen ninja shinobi is safe to download and play. The game does not contain any viruses, malware, or spyware that might harm your device or data. However, you should always download the game from the official website or a trusted source to avoid any problems.
-
Q: Is narsen ninja shinobi an online or offline game?
-
A: Narsen ninja shinobi is both an online and offline game. You can play the game online with other players or offline with bots. However, some features and modes might require an internet connection to work properly.
-
Q: How can I update narsen ninja shinobi?
-
A: Narsen ninja shinobi is updated regularly with new content and features. You can update the game by going to the official website or the app store and downloading the latest version. You can also turn on the auto-update feature on your device to get the updates automatically.
-
Q: How can I contact the developers of narsen ninja shinobi?
-
A: If you have any questions, feedback, suggestions, or issues regarding narsen ninja shinobi, you can contact the developers by sending them an email at narsenninjashinobi@gmail.com. You can also follow them on their social media accounts (Facebook, Twitter, Instagram) for more updates and news.
-
Q: Can I play narsen ninja shinobi on PC?
-
A: Narsen ninja shinobi is currently only available for Android and iOS devices. However, you can play the game on PC using an emulator such as BlueStacks or NoxPlayer. These emulators allow you to run Android apps on your PC.
-
- """
- soup = BeautifulSoup(body, "html.parser")
- links = extract_hyperlinks(soup, "http://example.com")
- self.assertEqual(
- links,
- [("Google", "https://google.com"), ("Foo", "http://example.com/foo.html")],
- )
diff --git a/spaces/flowers-team/SocialAISchool/draw_trees.sh b/spaces/flowers-team/SocialAISchool/draw_trees.sh
deleted file mode 100644
index 0c0c0e75f9e364954b5bfd257125d18b56cc7e5d..0000000000000000000000000000000000000000
--- a/spaces/flowers-team/SocialAISchool/draw_trees.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-## Pointing
-#python draw_tree.py cs_trees/pointing_tree_train SocialAI-EPointingHeldoutDoorsTrainInformationSeekingParamEnv-v1
-#python draw_tree.py cs_trees/pointing_tree_test SocialAI-EPointingDoorsTestInformationSeekingParamEnv-v1
-#
-## Role Reversal
-#python draw_tree.py cs_trees/rr_tree_B_single SocialAI-MarblePassBCollaborationParamEnv-v1
-#python draw_tree.py cs_trees/rr_tree_asoc_single SocialAI-AsocialMarbleCollaborationParamEnv-v1
-#python draw_tree.py cs_trees/rr_tree_B_group SocialAI-RoleReversalGroupExperimentalCollaborationParamEnv-v1
-#python draw_tree.py cs_trees/rr_tree_asoc_group SocialAI-RoleReversalGroupControlCollaborationParamEnv-v1
-#python draw_tree.py cs_trees/rr_tree_A SocialAI-MarblePassACollaborationParamEnv-v1
-#
-## Scaffolding
-#python draw_tree.py cs_trees/scaf_tree_test SocialAI-AELangFeedbackTrainFormatsCSParamEnv-v1
-#python draw_tree.py cs_trees/scaf_tree_4 SocialAI-AELangFeedbackTrainScaffoldingCSParamEnv-v1 -acl-type intro_seq
-#python draw_tree.py cs_trees/scaf_tree_8 SocialAI-AELangFeedbackTrainScaffoldingCSParamEnv-v1 --acl-type intro_seq_seq
-
-# LLMs
-#python draw_tree.py cs_trees/llms_tree_asoc_apple SocialAI-AsocialBoxInformationSeekingParamEnv-v1
-#python draw_tree.py cs_trees/llms_tree_color_box SocialAI-ColorBoxesLLMCSParamEnv-v1
diff --git a/spaces/freddyaboulton/3.1.4.9-all-demos/demos/blocks_inputs/run.py b/spaces/freddyaboulton/3.1.4.9-all-demos/demos/blocks_inputs/run.py
deleted file mode 100644
index 502b1ed9145c37c841f5d49b7eecc18877eb45ca..0000000000000000000000000000000000000000
--- a/spaces/freddyaboulton/3.1.4.9-all-demos/demos/blocks_inputs/run.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import gradio as gr
-import os
-
-def combine(a, b):
- return a + " " + b
-
-def mirror(x):
- return x
-
-with gr.Blocks() as demo:
-
- txt = gr.Textbox(label="Input", lines=2)
- txt_2 = gr.Textbox(label="Input 2")
- txt_3 = gr.Textbox(value="", label="Output")
- btn = gr.Button(value="Submit")
- btn.click(combine, inputs=[txt, txt_2], outputs=[txt_3])
-
- with gr.Row():
- im = gr.Image()
- im_2 = gr.Image()
-
- btn = gr.Button(value="Mirror Image")
- btn.click(mirror, inputs=[im], outputs=[im_2])
-
- gr.Markdown("## Text Examples")
- gr.Examples([["hi", "Adam"], ["hello", "Eve"]], [txt, txt_2], txt_3, combine, cache_examples=True)
- gr.Markdown("## Image Examples")
- gr.Examples(
- examples=[os.path.join(os.path.dirname(__file__), "lion.jpg")],
- inputs=im,
- outputs=im_2,
- fn=mirror,
- cache_examples=True)
-
-if __name__ == "__main__":
- demo.launch()
diff --git a/spaces/freddyaboulton/gradio_folium/src/backend/gradio_folium/templates/example/index.js b/spaces/freddyaboulton/gradio_folium/src/backend/gradio_folium/templates/example/index.js
deleted file mode 100644
index 66c53e723c2e89d6205a5648d7b96faae1ae5543..0000000000000000000000000000000000000000
--- a/spaces/freddyaboulton/gradio_folium/src/backend/gradio_folium/templates/example/index.js
+++ /dev/null
@@ -1,88 +0,0 @@
-const {
- SvelteComponent: f,
- append: u,
- attr: d,
- detach: g,
- element: o,
- init: v,
- insert: r,
- noop: c,
- safe_not_equal: y,
- set_data: m,
- text: b,
- toggle_class: i
-} = window.__gradio__svelte__internal;
-function w(a) {
- let e, n;
- return {
- c() {
- e = o("div"), n = b(
- /*value*/
- a[0]
- ), d(e, "class", "svelte-1gecy8w"), i(
- e,
- "table",
- /*type*/
- a[1] === "table"
- ), i(
- e,
- "gallery",
- /*type*/
- a[1] === "gallery"
- ), i(
- e,
- "selected",
- /*selected*/
- a[2]
- );
- },
- m(t, l) {
- r(t, e, l), u(e, n);
- },
- p(t, [l]) {
- l & /*value*/
- 1 && m(
- n,
- /*value*/
- t[0]
- ), l & /*type*/
- 2 && i(
- e,
- "table",
- /*type*/
- t[1] === "table"
- ), l & /*type*/
- 2 && i(
- e,
- "gallery",
- /*type*/
- t[1] === "gallery"
- ), l & /*selected*/
- 4 && i(
- e,
- "selected",
- /*selected*/
- t[2]
- );
- },
- i: c,
- o: c,
- d(t) {
- t && g(e);
- }
- };
-}
-function h(a, e, n) {
- let { value: t } = e, { type: l } = e, { selected: _ = !1 } = e;
- return a.$$set = (s) => {
- "value" in s && n(0, t = s.value), "type" in s && n(1, l = s.type), "selected" in s && n(2, _ = s.selected);
- }, [t, l, _];
-}
-class E extends f {
- constructor(e) {
- super(), v(this, e, h, w, y, { value: 0, type: 1, selected: 2 });
- }
-}
-export {
- E as default
-};
diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/backbones/unet.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/backbones/unet.py
deleted file mode 100644
index 82caa16a94c195c192a2a920fb7bc7e60f0f3ce3..0000000000000000000000000000000000000000
--- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/backbones/unet.py
+++ /dev/null
@@ -1,429 +0,0 @@
-import torch.nn as nn
-import torch.utils.checkpoint as cp
-from annotator.uniformer.mmcv.cnn import (UPSAMPLE_LAYERS, ConvModule, build_activation_layer,
- build_norm_layer, constant_init, kaiming_init)
-from annotator.uniformer.mmcv.runner import load_checkpoint
-from annotator.uniformer.mmcv.utils.parrots_wrapper import _BatchNorm
-
-from annotator.uniformer.mmseg.utils import get_root_logger
-from ..builder import BACKBONES
-from ..utils import UpConvBlock
-
-
-class BasicConvBlock(nn.Module):
- """Basic convolutional block for UNet.
-
- This module consists of several plain convolutional layers.
-
- Args:
- in_channels (int): Number of input channels.
- out_channels (int): Number of output channels.
- num_convs (int): Number of convolutional layers. Default: 2.
- stride (int): Whether use stride convolution to downsample
- the input feature map. If stride=2, it only uses stride convolution
- in the first convolutional layer to downsample the input feature
- map. Options are 1 or 2. Default: 1.
- dilation (int): Whether use dilated convolution to expand the
- receptive field. Set dilation rate of each convolutional layer and
- the dilation rate of the first convolutional layer is always 1.
- Default: 1.
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
- memory while slowing down the training speed. Default: False.
- conv_cfg (dict | None): Config dict for convolution layer.
- Default: None.
- norm_cfg (dict | None): Config dict for normalization layer.
- Default: dict(type='BN').
- act_cfg (dict | None): Config dict for activation layer in ConvModule.
- Default: dict(type='ReLU').
- dcn (bool): Use deformable convolution in convolutional layer or not.
- Default: None.
- plugins (dict): plugins for convolutional layers. Default: None.
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- num_convs=2,
- stride=1,
- dilation=1,
- with_cp=False,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- act_cfg=dict(type='ReLU'),
- dcn=None,
- plugins=None):
- super(BasicConvBlock, self).__init__()
- assert dcn is None, 'Not implemented yet.'
- assert plugins is None, 'Not implemented yet.'
-
- self.with_cp = with_cp
- convs = []
- for i in range(num_convs):
- convs.append(
- ConvModule(
- in_channels=in_channels if i == 0 else out_channels,
- out_channels=out_channels,
- kernel_size=3,
- stride=stride if i == 0 else 1,
- dilation=1 if i == 0 else dilation,
- padding=1 if i == 0 else dilation,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg))
-
- self.convs = nn.Sequential(*convs)
-
- def forward(self, x):
- """Forward function."""
-
- if self.with_cp and x.requires_grad:
- out = cp.checkpoint(self.convs, x)
- else:
- out = self.convs(x)
- return out
-
-
-@UPSAMPLE_LAYERS.register_module()
-class DeconvModule(nn.Module):
- """Deconvolution upsample module in decoder for UNet (2X upsample).
-
- This module uses deconvolution to upsample feature map in the decoder
- of UNet.
-
- Args:
- in_channels (int): Number of input channels.
- out_channels (int): Number of output channels.
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
- memory while slowing down the training speed. Default: False.
- norm_cfg (dict | None): Config dict for normalization layer.
- Default: dict(type='BN').
- act_cfg (dict | None): Config dict for activation layer in ConvModule.
- Default: dict(type='ReLU').
- kernel_size (int): Kernel size of the convolutional layer. Default: 4.
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- with_cp=False,
- norm_cfg=dict(type='BN'),
- act_cfg=dict(type='ReLU'),
- *,
- kernel_size=4,
- scale_factor=2):
- super(DeconvModule, self).__init__()
-
- assert (kernel_size - scale_factor >= 0) and\
- (kernel_size - scale_factor) % 2 == 0,\
- f'kernel_size should be greater than or equal to scale_factor '\
- f'and (kernel_size - scale_factor) should be even numbers, '\
- f'while the kernel size is {kernel_size} and scale_factor is '\
- f'{scale_factor}.'
-
- stride = scale_factor
- padding = (kernel_size - scale_factor) // 2
- self.with_cp = with_cp
- deconv = nn.ConvTranspose2d(
- in_channels,
- out_channels,
- kernel_size=kernel_size,
- stride=stride,
- padding=padding)
-
- norm_name, norm = build_norm_layer(norm_cfg, out_channels)
- activate = build_activation_layer(act_cfg)
- self.deconv_upsamping = nn.Sequential(deconv, norm, activate)
-
- def forward(self, x):
- """Forward function."""
-
- if self.with_cp and x.requires_grad:
- out = cp.checkpoint(self.deconv_upsamping, x)
- else:
- out = self.deconv_upsamping(x)
- return out
-
-
-@UPSAMPLE_LAYERS.register_module()
-class InterpConv(nn.Module):
- """Interpolation upsample module in decoder for UNet.
-
- This module uses interpolation to upsample feature map in the decoder
- of UNet. It consists of one interpolation upsample layer and one
- convolutional layer. It can be one interpolation upsample layer followed
- by one convolutional layer (conv_first=False) or one convolutional layer
- followed by one interpolation upsample layer (conv_first=True).
-
- Args:
- in_channels (int): Number of input channels.
- out_channels (int): Number of output channels.
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
- memory while slowing down the training speed. Default: False.
- norm_cfg (dict | None): Config dict for normalization layer.
- Default: dict(type='BN').
- act_cfg (dict | None): Config dict for activation layer in ConvModule.
- Default: dict(type='ReLU').
- conv_cfg (dict | None): Config dict for convolution layer.
- Default: None.
- conv_first (bool): Whether convolutional layer or interpolation
- upsample layer first. Default: False. It means interpolation
- upsample layer followed by one convolutional layer.
- kernel_size (int): Kernel size of the convolutional layer. Default: 1.
- stride (int): Stride of the convolutional layer. Default: 1.
- padding (int): Padding of the convolutional layer. Default: 1.
- upsample_cfg (dict): Interpolation config of the upsample layer.
- Default: dict(
- scale_factor=2, mode='bilinear', align_corners=False).
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- with_cp=False,
- norm_cfg=dict(type='BN'),
- act_cfg=dict(type='ReLU'),
- *,
- conv_cfg=None,
- conv_first=False,
- kernel_size=1,
- stride=1,
- padding=0,
- upsample_cfg=dict(
- scale_factor=2, mode='bilinear', align_corners=False)):
- super(InterpConv, self).__init__()
-
- self.with_cp = with_cp
- conv = ConvModule(
- in_channels,
- out_channels,
- kernel_size=kernel_size,
- stride=stride,
- padding=padding,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg)
- upsample = nn.Upsample(**upsample_cfg)
- if conv_first:
- self.interp_upsample = nn.Sequential(conv, upsample)
- else:
- self.interp_upsample = nn.Sequential(upsample, conv)
-
- def forward(self, x):
- """Forward function."""
-
- if self.with_cp and x.requires_grad:
- out = cp.checkpoint(self.interp_upsample, x)
- else:
- out = self.interp_upsample(x)
- return out
-
-
-@BACKBONES.register_module()
-class UNet(nn.Module):
- """UNet backbone.
- U-Net: Convolutional Networks for Biomedical Image Segmentation.
- https://arxiv.org/pdf/1505.04597.pdf
-
- Args:
- in_channels (int): Number of input image channels. Default" 3.
- base_channels (int): Number of base channels of each stage.
- The output channels of the first stage. Default: 64.
- num_stages (int): Number of stages in encoder, normally 5. Default: 5.
- strides (Sequence[int 1 | 2]): Strides of each stage in encoder.
- len(strides) is equal to num_stages. Normally the stride of the
- first stage in encoder is 1. If strides[i]=2, it uses stride
- convolution to downsample in the correspondence encoder stage.
- Default: (1, 1, 1, 1, 1).
- enc_num_convs (Sequence[int]): Number of convolutional layers in the
- convolution block of the correspondence encoder stage.
- Default: (2, 2, 2, 2, 2).
- dec_num_convs (Sequence[int]): Number of convolutional layers in the
- convolution block of the correspondence decoder stage.
- Default: (2, 2, 2, 2).
- downsamples (Sequence[int]): Whether use MaxPool to downsample the
- feature map after the first stage of encoder
- (stages: [1, num_stages)). If the correspondence encoder stage use
- stride convolution (strides[i]=2), it will never use MaxPool to
- downsample, even downsamples[i-1]=True.
- Default: (True, True, True, True).
- enc_dilations (Sequence[int]): Dilation rate of each stage in encoder.
- Default: (1, 1, 1, 1, 1).
- dec_dilations (Sequence[int]): Dilation rate of each stage in decoder.
- Default: (1, 1, 1, 1).
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
- memory while slowing down the training speed. Default: False.
- conv_cfg (dict | None): Config dict for convolution layer.
- Default: None.
- norm_cfg (dict | None): Config dict for normalization layer.
- Default: dict(type='BN').
- act_cfg (dict | None): Config dict for activation layer in ConvModule.
- Default: dict(type='ReLU').
- upsample_cfg (dict): The upsample config of the upsample module in
- decoder. Default: dict(type='InterpConv').
- norm_eval (bool): Whether to set norm layers to eval mode, namely,
- freeze running stats (mean and var). Note: Effect on Batch Norm
- and its variants only. Default: False.
- dcn (bool): Use deformable convolution in convolutional layer or not.
- Default: None.
- plugins (dict): plugins for convolutional layers. Default: None.
-
- Notice:
- The input image size should be divisible by the whole downsample rate
- of the encoder. More detail of the whole downsample rate can be found
- in UNet._check_input_divisible.
-
- """
-
- def __init__(self,
- in_channels=3,
- base_channels=64,
- num_stages=5,
- strides=(1, 1, 1, 1, 1),
- enc_num_convs=(2, 2, 2, 2, 2),
- dec_num_convs=(2, 2, 2, 2),
- downsamples=(True, True, True, True),
- enc_dilations=(1, 1, 1, 1, 1),
- dec_dilations=(1, 1, 1, 1),
- with_cp=False,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- act_cfg=dict(type='ReLU'),
- upsample_cfg=dict(type='InterpConv'),
- norm_eval=False,
- dcn=None,
- plugins=None):
- super(UNet, self).__init__()
- assert dcn is None, 'Not implemented yet.'
- assert plugins is None, 'Not implemented yet.'
- assert len(strides) == num_stages, \
- 'The length of strides should be equal to num_stages, '\
- f'while the strides is {strides}, the length of '\
- f'strides is {len(strides)}, and the num_stages is '\
- f'{num_stages}.'
- assert len(enc_num_convs) == num_stages, \
- 'The length of enc_num_convs should be equal to num_stages, '\
- f'while the enc_num_convs is {enc_num_convs}, the length of '\
- f'enc_num_convs is {len(enc_num_convs)}, and the num_stages is '\
- f'{num_stages}.'
- assert len(dec_num_convs) == (num_stages-1), \
- 'The length of dec_num_convs should be equal to (num_stages-1), '\
- f'while the dec_num_convs is {dec_num_convs}, the length of '\
- f'dec_num_convs is {len(dec_num_convs)}, and the num_stages is '\
- f'{num_stages}.'
- assert len(downsamples) == (num_stages-1), \
- 'The length of downsamples should be equal to (num_stages-1), '\
- f'while the downsamples is {downsamples}, the length of '\
- f'downsamples is {len(downsamples)}, and the num_stages is '\
- f'{num_stages}.'
- assert len(enc_dilations) == num_stages, \
- 'The length of enc_dilations should be equal to num_stages, '\
- f'while the enc_dilations is {enc_dilations}, the length of '\
- f'enc_dilations is {len(enc_dilations)}, and the num_stages is '\
- f'{num_stages}.'
- assert len(dec_dilations) == (num_stages-1), \
- 'The length of dec_dilations should be equal to (num_stages-1), '\
- f'while the dec_dilations is {dec_dilations}, the length of '\
- f'dec_dilations is {len(dec_dilations)}, and the num_stages is '\
- f'{num_stages}.'
- self.num_stages = num_stages
- self.strides = strides
- self.downsamples = downsamples
- self.norm_eval = norm_eval
- self.base_channels = base_channels
-
- self.encoder = nn.ModuleList()
- self.decoder = nn.ModuleList()
-
- for i in range(num_stages):
- enc_conv_block = []
- if i != 0:
- if strides[i] == 1 and downsamples[i - 1]:
- enc_conv_block.append(nn.MaxPool2d(kernel_size=2))
- upsample = (strides[i] != 1 or downsamples[i - 1])
- self.decoder.append(
- UpConvBlock(
- conv_block=BasicConvBlock,
- in_channels=base_channels * 2**i,
- skip_channels=base_channels * 2**(i - 1),
- out_channels=base_channels * 2**(i - 1),
- num_convs=dec_num_convs[i - 1],
- stride=1,
- dilation=dec_dilations[i - 1],
- with_cp=with_cp,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg,
- upsample_cfg=upsample_cfg if upsample else None,
- dcn=None,
- plugins=None))
-
- enc_conv_block.append(
- BasicConvBlock(
- in_channels=in_channels,
- out_channels=base_channels * 2**i,
- num_convs=enc_num_convs[i],
- stride=strides[i],
- dilation=enc_dilations[i],
- with_cp=with_cp,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg,
- dcn=None,
- plugins=None))
- self.encoder.append((nn.Sequential(*enc_conv_block)))
- in_channels = base_channels * 2**i
-
- def forward(self, x):
- self._check_input_divisible(x)
- enc_outs = []
- for enc in self.encoder:
- x = enc(x)
- enc_outs.append(x)
- dec_outs = [x]
- for i in reversed(range(len(self.decoder))):
- x = self.decoder[i](enc_outs[i], x)
- dec_outs.append(x)
-
- return dec_outs
-
- def train(self, mode=True):
- """Convert the model into training mode while keep normalization layer
- freezed."""
- super(UNet, self).train(mode)
- if mode and self.norm_eval:
- for m in self.modules():
- # trick: eval have effect on BatchNorm only
- if isinstance(m, _BatchNorm):
- m.eval()
-
- def _check_input_divisible(self, x):
- h, w = x.shape[-2:]
- whole_downsample_rate = 1
- for i in range(1, self.num_stages):
- if self.strides[i] == 2 or self.downsamples[i - 1]:
- whole_downsample_rate *= 2
- assert (h % whole_downsample_rate == 0) \
- and (w % whole_downsample_rate == 0),\
- f'The input image size {(h, w)} should be divisible by the whole '\
- f'downsample rate {whole_downsample_rate}, when num_stages is '\
- f'{self.num_stages}, strides is {self.strides}, and downsamples '\
- f'is {self.downsamples}.'
-
- def init_weights(self, pretrained=None):
- """Initialize the weights in backbone.
-
- Args:
- pretrained (str, optional): Path to pre-trained weights.
- Defaults to None.
- """
- if isinstance(pretrained, str):
- logger = get_root_logger()
- load_checkpoint(self, pretrained, strict=False, logger=logger)
- elif pretrained is None:
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- kaiming_init(m)
- elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
- constant_init(m, 1)
- else:
- raise TypeError('pretrained must be a str or None')
diff --git a/spaces/ghlee94/MEDIAR/segmentation_models_pytorch/encoders/mix_transformer.py b/spaces/ghlee94/MEDIAR/segmentation_models_pytorch/encoders/mix_transformer.py
deleted file mode 100644
index 62269b39a4717c8db851d9ad8a4e45b38c79d57d..0000000000000000000000000000000000000000
--- a/spaces/ghlee94/MEDIAR/segmentation_models_pytorch/encoders/mix_transformer.py
+++ /dev/null
@@ -1,664 +0,0 @@
-# ---------------------------------------------------------------
-# Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
-#
-# This work is licensed under the NVIDIA Source Code License
-# ---------------------------------------------------------------
-import math
-import torch
-import torch.nn as nn
-from functools import partial
-
-from timm.models.layers import DropPath, to_2tuple, trunc_normal_
-
-
-class Mlp(nn.Module):
- def __init__(
- self,
- in_features,
- hidden_features=None,
- out_features=None,
- act_layer=nn.GELU,
- drop=0.0,
- ):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.dwconv = DWConv(hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=0.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
- elif isinstance(m, nn.Conv2d):
- fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
- fan_out //= m.groups
- m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
- if m.bias is not None:
- m.bias.data.zero_()
-
- def forward(self, x, H, W):
- x = self.fc1(x)
- x = self.dwconv(x, H, W)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-class Attention(nn.Module):
- def __init__(
- self,
- dim,
- num_heads=8,
- qkv_bias=False,
- qk_scale=None,
- attn_drop=0.0,
- proj_drop=0.0,
- sr_ratio=1,
- ):
- super().__init__()
- assert (
- dim % num_heads == 0
- ), f"dim {dim} should be divided by num_heads {num_heads}."
-
- self.dim = dim
- self.num_heads = num_heads
- head_dim = dim // num_heads
- self.scale = qk_scale or head_dim ** -0.5
-
- self.q = nn.Linear(dim, dim, bias=qkv_bias)
- self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
-
- self.sr_ratio = sr_ratio
- if sr_ratio > 1:
- self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
- self.norm = nn.LayerNorm(dim)
-
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=0.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
- elif isinstance(m, nn.Conv2d):
- fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
- fan_out //= m.groups
- m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
- if m.bias is not None:
- m.bias.data.zero_()
-
- def forward(self, x, H, W):
- B, N, C = x.shape
- q = (
- self.q(x)
- .reshape(B, N, self.num_heads, C // self.num_heads)
- .permute(0, 2, 1, 3)
- )
-
- if self.sr_ratio > 1:
- x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
- x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
- x_ = self.norm(x_)
- kv = (
- self.kv(x_)
- .reshape(B, -1, 2, self.num_heads, C // self.num_heads)
- .permute(2, 0, 3, 1, 4)
- )
- else:
- kv = (
- self.kv(x)
- .reshape(B, -1, 2, self.num_heads, C // self.num_heads)
- .permute(2, 0, 3, 1, 4)
- )
- k, v = kv[0], kv[1]
-
- attn = (q @ k.transpose(-2, -1)) * self.scale
- attn = attn.softmax(dim=-1)
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
- x = self.proj(x)
- x = self.proj_drop(x)
-
- return x
-
-
-class Block(nn.Module):
- def __init__(
- self,
- dim,
- num_heads,
- mlp_ratio=4.0,
- qkv_bias=False,
- qk_scale=None,
- drop=0.0,
- attn_drop=0.0,
- drop_path=0.0,
- act_layer=nn.GELU,
- norm_layer=nn.LayerNorm,
- sr_ratio=1,
- ):
- super().__init__()
- self.norm1 = norm_layer(dim)
- self.attn = Attention(
- dim,
- num_heads=num_heads,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- attn_drop=attn_drop,
- proj_drop=drop,
- sr_ratio=sr_ratio,
- )
- # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
- self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(
- in_features=dim,
- hidden_features=mlp_hidden_dim,
- act_layer=act_layer,
- drop=drop,
- )
-
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=0.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
- elif isinstance(m, nn.Conv2d):
- fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
- fan_out //= m.groups
- m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
- if m.bias is not None:
- m.bias.data.zero_()
-
- def forward(self, x, H, W):
- x = x + self.drop_path(self.attn(self.norm1(x), H, W))
- x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
-
- return x
-
-
-class OverlapPatchEmbed(nn.Module):
- """Image to Patch Embedding"""
-
- def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768):
- super().__init__()
- img_size = to_2tuple(img_size)
- patch_size = to_2tuple(patch_size)
-
- self.img_size = img_size
- self.patch_size = patch_size
- self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
- self.num_patches = self.H * self.W
- self.proj = nn.Conv2d(
- in_chans,
- embed_dim,
- kernel_size=patch_size,
- stride=stride,
- padding=(patch_size[0] // 2, patch_size[1] // 2),
- )
- self.norm = nn.LayerNorm(embed_dim)
-
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=0.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
- elif isinstance(m, nn.Conv2d):
- fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
- fan_out //= m.groups
- m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
- if m.bias is not None:
- m.bias.data.zero_()
-
- def forward(self, x):
- x = self.proj(x)
- _, _, H, W = x.shape
- x = x.flatten(2).transpose(1, 2)
- x = self.norm(x)
-
- return x, H, W
-
-
-class MixVisionTransformer(nn.Module):
- def __init__(
- self,
- img_size=224,
- patch_size=16,
- in_chans=3,
- num_classes=1000,
- embed_dims=[64, 128, 256, 512],
- num_heads=[1, 2, 4, 8],
- mlp_ratios=[4, 4, 4, 4],
- qkv_bias=False,
- qk_scale=None,
- drop_rate=0.0,
- attn_drop_rate=0.0,
- drop_path_rate=0.0,
- norm_layer=nn.LayerNorm,
- depths=[3, 4, 6, 3],
- sr_ratios=[8, 4, 2, 1],
- ):
- super().__init__()
- self.num_classes = num_classes
- self.depths = depths
-
- # patch_embed
- self.patch_embed1 = OverlapPatchEmbed(
- img_size=img_size,
- patch_size=7,
- stride=4,
- in_chans=in_chans,
- embed_dim=embed_dims[0],
- )
- self.patch_embed2 = OverlapPatchEmbed(
- img_size=img_size // 4,
- patch_size=3,
- stride=2,
- in_chans=embed_dims[0],
- embed_dim=embed_dims[1],
- )
- self.patch_embed3 = OverlapPatchEmbed(
- img_size=img_size // 8,
- patch_size=3,
- stride=2,
- in_chans=embed_dims[1],
- embed_dim=embed_dims[2],
- )
- self.patch_embed4 = OverlapPatchEmbed(
- img_size=img_size // 16,
- patch_size=3,
- stride=2,
- in_chans=embed_dims[2],
- embed_dim=embed_dims[3],
- )
-
- # transformer encoder
- dpr = [
- x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
- ] # stochastic depth decay rule
- cur = 0
- self.block1 = nn.ModuleList(
- [
- Block(
- dim=embed_dims[0],
- num_heads=num_heads[0],
- mlp_ratio=mlp_ratios[0],
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[cur + i],
- norm_layer=norm_layer,
- sr_ratio=sr_ratios[0],
- )
- for i in range(depths[0])
- ]
- )
- self.norm1 = norm_layer(embed_dims[0])
-
- cur += depths[0]
- self.block2 = nn.ModuleList(
- [
- Block(
- dim=embed_dims[1],
- num_heads=num_heads[1],
- mlp_ratio=mlp_ratios[1],
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[cur + i],
- norm_layer=norm_layer,
- sr_ratio=sr_ratios[1],
- )
- for i in range(depths[1])
- ]
- )
- self.norm2 = norm_layer(embed_dims[1])
-
- cur += depths[1]
- self.block3 = nn.ModuleList(
- [
- Block(
- dim=embed_dims[2],
- num_heads=num_heads[2],
- mlp_ratio=mlp_ratios[2],
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[cur + i],
- norm_layer=norm_layer,
- sr_ratio=sr_ratios[2],
- )
- for i in range(depths[2])
- ]
- )
- self.norm3 = norm_layer(embed_dims[2])
-
- cur += depths[2]
- self.block4 = nn.ModuleList(
- [
- Block(
- dim=embed_dims[3],
- num_heads=num_heads[3],
- mlp_ratio=mlp_ratios[3],
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[cur + i],
- norm_layer=norm_layer,
- sr_ratio=sr_ratios[3],
- )
- for i in range(depths[3])
- ]
- )
- self.norm4 = norm_layer(embed_dims[3])
-
- # classification head
- # self.head = nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity()
-
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=0.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
- elif isinstance(m, nn.Conv2d):
- fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
- fan_out //= m.groups
- m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
- if m.bias is not None:
- m.bias.data.zero_()
-
- def init_weights(self, pretrained=None):
- pass
-
- def reset_drop_path(self, drop_path_rate):
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
- cur = 0
- for i in range(self.depths[0]):
- self.block1[i].drop_path.drop_prob = dpr[cur + i]
-
- cur += self.depths[0]
- for i in range(self.depths[1]):
- self.block2[i].drop_path.drop_prob = dpr[cur + i]
-
- cur += self.depths[1]
- for i in range(self.depths[2]):
- self.block3[i].drop_path.drop_prob = dpr[cur + i]
-
- cur += self.depths[2]
- for i in range(self.depths[3]):
- self.block4[i].drop_path.drop_prob = dpr[cur + i]
-
- def freeze_patch_emb(self):
- self.patch_embed1.requires_grad = False
-
- @torch.jit.ignore
- def no_weight_decay(self):
- return {
- "pos_embed1",
- "pos_embed2",
- "pos_embed3",
- "pos_embed4",
- "cls_token",
- } # has pos_embed may be better
-
- def get_classifier(self):
- return self.head
-
- def reset_classifier(self, num_classes, global_pool=""):
- self.num_classes = num_classes
- self.head = (
- nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
- )
-
- def forward_features(self, x):
- B = x.shape[0]
- outs = []
-
- # stage 1
- x, H, W = self.patch_embed1(x)
- for i, blk in enumerate(self.block1):
- x = blk(x, H, W)
- x = self.norm1(x)
- x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
- outs.append(x)
-
- # stage 2
- x, H, W = self.patch_embed2(x)
- for i, blk in enumerate(self.block2):
- x = blk(x, H, W)
- x = self.norm2(x)
- x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
- outs.append(x)
-
- # stage 3
- x, H, W = self.patch_embed3(x)
- for i, blk in enumerate(self.block3):
- x = blk(x, H, W)
- x = self.norm3(x)
- x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
- outs.append(x)
-
- # stage 4
- x, H, W = self.patch_embed4(x)
- for i, blk in enumerate(self.block4):
- x = blk(x, H, W)
- x = self.norm4(x)
- x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
- outs.append(x)
-
- return outs
-
- def forward(self, x):
- x = self.forward_features(x)
- # x = self.head(x)
-
- return x
-
-
-class DWConv(nn.Module):
- def __init__(self, dim=768):
- super(DWConv, self).__init__()
- self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
-
- def forward(self, x, H, W):
- B, N, C = x.shape
- x = x.transpose(1, 2).view(B, C, H, W)
- x = self.dwconv(x)
- x = x.flatten(2).transpose(1, 2)
-
- return x
-
-
-# ---------------------------------------------------------------
-# End of NVIDIA code
-# ---------------------------------------------------------------
-
-from ._base import EncoderMixin # noqa E402
-
-
-class MixVisionTransformerEncoder(MixVisionTransformer, EncoderMixin):
- def __init__(self, out_channels, depth=5, **kwargs):
- super().__init__(**kwargs)
- self._out_channels = out_channels
- self._depth = depth
- self._in_channels = 3
-
- def make_dilated(self, *args, **kwargs):
- raise ValueError("MixVisionTransformer encoder does not support dilated mode")
-
- def set_in_channels(self, in_channels, *args, **kwargs):
- if in_channels != 3:
- raise ValueError(
- "MixVisionTransformer encoder does not support in_channels setting other than 3"
- )
-
- def forward(self, x):
-
- # create dummy output for the first block
- B, C, H, W = x.shape
- dummy = torch.empty([B, 0, H // 2, W // 2], dtype=x.dtype, device=x.device)
-
- return [x, dummy] + self.forward_features(x)[: self._depth - 1]
-
- def load_state_dict(self, state_dict):
- state_dict.pop("head.weight", None)
- state_dict.pop("head.bias", None)
- return super().load_state_dict(state_dict)
-
-
-def get_pretrained_cfg(name):
- return {
- "url": "https://github.com/qubvel/segmentation_models.pytorch/releases/download/v0.0.2/{}.pth".format(
- name
- ),
- "input_space": "RGB",
- "input_size": [3, 224, 224],
- "input_range": [0, 1],
- "mean": [0.485, 0.456, 0.406],
- "std": [0.229, 0.224, 0.225],
- }
-
-
-mix_transformer_encoders = {
- "mit_b0": {
- "encoder": MixVisionTransformerEncoder,
- "pretrained_settings": {"imagenet": get_pretrained_cfg("mit_b0"),},
- "params": dict(
- out_channels=(3, 0, 32, 64, 160, 256),
- patch_size=4,
- embed_dims=[32, 64, 160, 256],
- num_heads=[1, 2, 5, 8],
- mlp_ratios=[4, 4, 4, 4],
- qkv_bias=True,
- norm_layer=partial(nn.LayerNorm, eps=1e-6),
- depths=[2, 2, 2, 2],
- sr_ratios=[8, 4, 2, 1],
- drop_rate=0.0,
- drop_path_rate=0.1,
- ),
- },
- "mit_b1": {
- "encoder": MixVisionTransformerEncoder,
- "pretrained_settings": {"imagenet": get_pretrained_cfg("mit_b1"),},
- "params": dict(
- out_channels=(3, 0, 64, 128, 320, 512),
- patch_size=4,
- embed_dims=[64, 128, 320, 512],
- num_heads=[1, 2, 5, 8],
- mlp_ratios=[4, 4, 4, 4],
- qkv_bias=True,
- norm_layer=partial(nn.LayerNorm, eps=1e-6),
- depths=[2, 2, 2, 2],
- sr_ratios=[8, 4, 2, 1],
- drop_rate=0.0,
- drop_path_rate=0.1,
- ),
- },
- "mit_b2": {
- "encoder": MixVisionTransformerEncoder,
- "pretrained_settings": {"imagenet": get_pretrained_cfg("mit_b2"),},
- "params": dict(
- out_channels=(3, 0, 64, 128, 320, 512),
- patch_size=4,
- embed_dims=[64, 128, 320, 512],
- num_heads=[1, 2, 5, 8],
- mlp_ratios=[4, 4, 4, 4],
- qkv_bias=True,
- norm_layer=partial(nn.LayerNorm, eps=1e-6),
- depths=[3, 4, 6, 3],
- sr_ratios=[8, 4, 2, 1],
- drop_rate=0.0,
- drop_path_rate=0.1,
- ),
- },
- "mit_b3": {
- "encoder": MixVisionTransformerEncoder,
- "pretrained_settings": {"imagenet": get_pretrained_cfg("mit_b3"),},
- "params": dict(
- out_channels=(3, 0, 64, 128, 320, 512),
- patch_size=4,
- embed_dims=[64, 128, 320, 512],
- num_heads=[1, 2, 5, 8],
- mlp_ratios=[4, 4, 4, 4],
- qkv_bias=True,
- norm_layer=partial(nn.LayerNorm, eps=1e-6),
- depths=[3, 4, 18, 3],
- sr_ratios=[8, 4, 2, 1],
- drop_rate=0.0,
- drop_path_rate=0.1,
- ),
- },
- "mit_b4": {
- "encoder": MixVisionTransformerEncoder,
- "pretrained_settings": {"imagenet": get_pretrained_cfg("mit_b4"),},
- "params": dict(
- out_channels=(3, 0, 64, 128, 320, 512),
- patch_size=4,
- embed_dims=[64, 128, 320, 512],
- num_heads=[1, 2, 5, 8],
- mlp_ratios=[4, 4, 4, 4],
- qkv_bias=True,
- norm_layer=partial(nn.LayerNorm, eps=1e-6),
- depths=[3, 8, 27, 3],
- sr_ratios=[8, 4, 2, 1],
- drop_rate=0.0,
- drop_path_rate=0.1,
- ),
- },
- "mit_b5": {
- "encoder": MixVisionTransformerEncoder,
- "pretrained_settings": {"imagenet": get_pretrained_cfg("mit_b5"),},
- "params": dict(
- out_channels=(3, 0, 64, 128, 320, 512),
- patch_size=4,
- embed_dims=[64, 128, 320, 512],
- num_heads=[1, 2, 5, 8],
- mlp_ratios=[4, 4, 4, 4],
- qkv_bias=True,
- norm_layer=partial(nn.LayerNorm, eps=1e-6),
- depths=[3, 6, 40, 3],
- sr_ratios=[8, 4, 2, 1],
- drop_rate=0.0,
- drop_path_rate=0.1,
- ),
- },
-}
diff --git a/spaces/gordonchan/h2oo/utils_langchain.py b/spaces/gordonchan/h2oo/utils_langchain.py
deleted file mode 100644
index d50110fa0dc664a95dc99b3fa47053287507b689..0000000000000000000000000000000000000000
--- a/spaces/gordonchan/h2oo/utils_langchain.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from typing import Any, Dict, List, Union, Optional
-import time
-import queue
-
-from langchain.callbacks.base import BaseCallbackHandler
-from langchain.schema import LLMResult
-
-
-class StreamingGradioCallbackHandler(BaseCallbackHandler):
- """
- Similar to H2OTextIteratorStreamer that is for HF backend, but here LangChain backend
- """
- def __init__(self, timeout: Optional[float] = None, block=True):
- super().__init__()
- self.text_queue = queue.SimpleQueue()
- self.stop_signal = None
- self.do_stop = False
- self.timeout = timeout
- self.block = block
-
- def on_llm_start(
- self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
- ) -> None:
- """Run when LLM starts running. Clean the queue."""
- while not self.text_queue.empty():
- try:
- self.text_queue.get(block=False)
- except queue.Empty:
- continue
-
- def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
- """Run on new LLM token. Only available when streaming is enabled."""
- self.text_queue.put(token)
-
- def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
- """Run when LLM ends running."""
- self.text_queue.put(self.stop_signal)
-
- def on_llm_error(
- self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
- ) -> None:
- """Run when LLM errors."""
- self.text_queue.put(self.stop_signal)
-
- def __iter__(self):
- return self
-
- def __next__(self):
- while True:
- try:
- value = self.stop_signal # value looks unused in pycharm, not true
- if self.do_stop:
- print("hit stop", flush=True)
- # could raise or break, maybe best to raise and make parent see if any exception in thread
- raise StopIteration()
- # break
- value = self.text_queue.get(block=self.block, timeout=self.timeout)
- break
- except queue.Empty:
- time.sleep(0.01)
- if value == self.stop_signal:
- raise StopIteration()
- else:
- return value
diff --git a/spaces/gossminn/fillmorle-app/sftp/metrics/srl_metrics.py b/spaces/gossminn/fillmorle-app/sftp/metrics/srl_metrics.py
deleted file mode 100644
index e769237931e3ba692537948e44461990c1271b83..0000000000000000000000000000000000000000
--- a/spaces/gossminn/fillmorle-app/sftp/metrics/srl_metrics.py
+++ /dev/null
@@ -1,138 +0,0 @@
-from typing import *
-
-from allennlp.training.metrics import Metric
-from overrides import overrides
-import numpy as np
-import logging
-
-from .base_f import BaseF
-from ..utils import Span, max_match
-
-logger = logging.getLogger('srl_metric')
-
-
-@Metric.register('srl')
-class SRLMetric(Metric):
- def __init__(self, check_type: Optional[bool] = None):
- self.tri_i = BaseF('tri-i')
- self.tri_c = BaseF('tri-c')
- self.arg_i = BaseF('arg-i')
- self.arg_c = BaseF('arg-c')
- if check_type is not None:
- logger.warning('Check type argument is deprecated.')
-
- def reset(self) -> None:
- for metric in [self.tri_i, self.tri_c, self.arg_i, self.arg_c]:
- metric.reset()
-
- def get_metric(self, reset: bool) -> Dict[str, Any]:
- ret = dict()
- for metric in [self.tri_i, self.tri_c, self.arg_i, self.arg_c]:
- ret.update(metric.get_metric(reset))
- return ret
-
- @overrides
- def __call__(self, prediction: Span, gold: Span):
- self.with_label_event(prediction, gold)
- self.without_label_event(prediction, gold)
- self.tuple_eval(prediction, gold)
- # self.with_label_arg(prediction, gold)
- # self.without_label_arg(prediction, gold)
-
- def tuple_eval(self, prediction: Span, gold: Span):
- def extract_tuples(vr: Span, parent_boundary: bool):
- labeled, unlabeled = list(), list()
- for event in vr:
- for arg in event:
- if parent_boundary:
- labeled.append((event.boundary, event.label, arg.boundary, arg.label))
- unlabeled.append((event.boundary, event.label, arg.boundary))
- else:
- labeled.append((event.label, arg.boundary, arg.label))
- unlabeled.append((event.label, arg.boundary))
- return labeled, unlabeled
-
- def equal_matrix(l1, l2): return np.array([[e1 == e2 for e2 in l2] for e1 in l1], dtype=np.int)
-
- pred_label, pred_unlabel = extract_tuples(prediction, False)
- gold_label, gold_unlabel = extract_tuples(gold, False)
-
- if len(pred_label) == 0 or len(gold_label) == 0:
- arg_c_tp = arg_i_tp = 0
- else:
- label_bipartite = equal_matrix(pred_label, gold_label)
- unlabel_bipartite = equal_matrix(pred_unlabel, gold_unlabel)
- arg_c_tp, arg_i_tp = max_match(label_bipartite), max_match(unlabel_bipartite)
-
- arg_c_fp = prediction.n_nodes - len(prediction) - 1 - arg_c_tp
- arg_c_fn = gold.n_nodes - len(gold) - 1 - arg_c_tp
- arg_i_fp = prediction.n_nodes - len(prediction) - 1 - arg_i_tp
- arg_i_fn = gold.n_nodes - len(gold) - 1 - arg_i_tp
-
- assert arg_i_tp >= 0 and arg_i_fn >= 0 and arg_i_fp >= 0
- self.arg_i.tp += arg_i_tp
- self.arg_i.fp += arg_i_fp
- self.arg_i.fn += arg_i_fn
-
- assert arg_c_tp >= 0 and arg_c_fn >= 0 and arg_c_fp >= 0
- self.arg_c.tp += arg_c_tp
- self.arg_c.fp += arg_c_fp
- self.arg_c.fn += arg_c_fn
-
- def with_label_event(self, prediction: Span, gold: Span):
- trigger_tp = prediction.match(gold, True, 2) - 1
- trigger_fp = len(prediction) - trigger_tp
- trigger_fn = len(gold) - trigger_tp
- assert trigger_fp >= 0 and trigger_fn >= 0 and trigger_tp >= 0
- self.tri_c.tp += trigger_tp
- self.tri_c.fp += trigger_fp
- self.tri_c.fn += trigger_fn
-
- def with_label_arg(self, prediction: Span, gold: Span):
- trigger_tp = prediction.match(gold, True, 2) - 1
- role_tp = prediction.match(gold, True, ignore_parent_boundary=True) - 1 - trigger_tp
- role_fp = (prediction.n_nodes - 1 - len(prediction)) - role_tp
- role_fn = (gold.n_nodes - 1 - len(gold)) - role_tp
- assert role_fp >= 0 and role_fn >= 0 and role_tp >= 0
- self.arg_c.tp += role_tp
- self.arg_c.fp += role_fp
- self.arg_c.fn += role_fn
-
- def without_label_event(self, prediction: Span, gold: Span):
- tri_i_tp = prediction.match(gold, False, 2) - 1
- tri_i_fp = len(prediction) - tri_i_tp
- tri_i_fn = len(gold) - tri_i_tp
- assert tri_i_tp >= 0 and tri_i_fp >= 0 and tri_i_fn >= 0
- self.tri_i.tp += tri_i_tp
- self.tri_i.fp += tri_i_fp
- self.tri_i.fn += tri_i_fn
-
- def without_label_arg(self, prediction: Span, gold: Span):
- arg_i_tp = 0
- matched_pairs: List[Tuple[Span, Span]] = list()
- n_gold_arg, n_pred_arg = gold.n_nodes - len(gold) - 1, prediction.n_nodes - len(prediction) - 1
- prediction, gold = prediction.clone(), gold.clone()
- for p in prediction:
- for g in gold:
- if p.match(g, True, 1) == 1:
- arg_i_tp += (p.match(g, False) - 1)
- matched_pairs.append((p, g))
- break
- for p, g in matched_pairs:
- prediction.remove_child(p)
- gold.remove_child(g)
-
- sub_matches = np.zeros([len(prediction), len(gold)], np.int)
- for p_idx, p in enumerate(prediction):
- for g_idx, g in enumerate(gold):
- if p.label == g.label:
- sub_matches[p_idx, g_idx] = p.match(g, False, -1, True)
- arg_i_tp += max_match(sub_matches)
-
- arg_i_fp = n_pred_arg - arg_i_tp
- arg_i_fn = n_gold_arg - arg_i_tp
- assert arg_i_tp >= 0 and arg_i_fn >= 0 and arg_i_fp >= 0
-
- self.arg_i.tp += arg_i_tp
- self.arg_i.fp += arg_i_fp
- self.arg_i.fn += arg_i_fn
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Codice Attivazione Per Kaspersky Gratisl.md b/spaces/gotiQspiryo/whisper-ui/examples/Codice Attivazione Per Kaspersky Gratisl.md
deleted file mode 100644
index 261fa8b4db5b657bc570c22da3c7823273ff88dc..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Codice Attivazione Per Kaspersky Gratisl.md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
Errore "È stato superato il numero massimo di attivazioni consentite per questo codice di attivazione" Errore "La licenza è scaduta" Come attivare un'applicazione Kaspersky in più dispositivi
-
Il codice di attivazione potrebbe essere necessario per attivare l'applicazione dopo l'installazione, per spostarla in un altro dispositivo o in caso di problemi di licenza. Per istruzioni su come trovare il codice di attivazione, consulta questo articolo.
Informazioni sui codici di attivazione per le applicazioni Kaspersky Come salvare la licenza Kaspersky prima di reinstallare il sistema operativo Come ripristinare un'applicazione Kaspersky dopo la reinstallazione del sistema operativo Come trasferire la licenza di un'applicazione Kaspersky da un dispositivo all'altro Come reimpostare una password per un'applicazione Kaspersky Errore "È stato superato il numero massimo di attivazioni consentite per questo codice di attivazione" How to connect your device to My Kaspersky (in inglese)
-
Licenza Valida per aggiornamento oppure come nuova attivazione. Tutte le licenze sono anche in lingua italiana, facili da installare ed attivare. Invio codice di licenza tramite email entro 5 Minuti.
-
Se invece il cliente dovesse riscontrare qualsiasi problematica sulla licenza ci dovrà fornire tutte le informazioni necessarie per permetterci di testare la licenza, inviando una mail ad info@licenzesoftware.it, dove deve essere riportato il codice di attivazione da noi inviato, allegando anche possibilmente alcuni Screenshot dell'errore per poter risolvere in maniera tempestiva il problema.
-
Chiave Kaspersky gratuita. Chiave gratuita Kaspersky Internet Security 2019. Chiave Kaspersky ufficiale gratuita, codice di attivazione Kaspersky gratuito per 91, 184 e 365 giorni. kaspersky gratis antivirus 365 giorni. Codice di attivazione ufficiale gratuito di Kaspersky Keys. Attivazione gratuita del rinnovo della licenza di Kaspersky Keys for Kaspersky.
-
Gratuito serie fresche chiave codici di attivazione kaspersky anti-virus codice di attivazione gratuito kaspersky, licenza gratuita Kaspersky Internet Security 91, 184, 365 giorni Proteggi il tuo computer Kaspersky keys licenza gratuita per tutti i dispositivi Rinnovo della licenza gratuito Kaspersky Internet Security
-
Per il tuo computer Kaspersky Lab fornisce una protezione completamente gratuita. Scarica gratuitamente il Kaspersky ufficiale. Ottieni una licenza di attivazione gratuita per Kaspersky per 365 giorni. Gratuito versione completa e un codice di attivazione gratuito per Kaspersky. Kaspersky 2020 ufficiale gratuito. Scarica gratuitamente Kaspersky dal sito ufficiale, una licenza di attivazione per 365 giorni, per proteggere il tuo computer, i tuoi dati personali, i tuoi soldi e la tua famiglia. Ottieni Kaspersky gratis versione gratuita 365 giorni per sistema operativo Windows 7, 8.1 e successivi 10
-
-
1. Selezionare l'elenco dei proxy del Paese in cui attivare il programma antivirus in base alla regione del codice di attivazione. Link agli elenchi di proxy dei paesi - -list/ 2. Scegliere un server proxy (preferibilmente ANM o HIA). 3. Andare su KIS o KTS: Impostazioni => Avanzate => Rete => Impostazioni server proxy. 4. Selezionare "Utilizza le impostazioni proxy specificate". 5. Compilare i campi Indirizzo IP e numero di porta. Fare clic su Salva. 6. Immettere il codice di attivazione. 7. Attiviamo. 8. Dopo l'attivazione, riportiamo le impostazioni al loro stato originale.
-
Risposta
On-line con carta di credito.
On-line con fax.
Bonificio Bancario: Il messaggio di conferma sarà inviato a ricezione del bonifico bancario sul conto corrente Nexway Italia (il tempo stimato per la ricezione e l'invio della conferma d'ordine è di circa due, tre giorni lavorativi).
Online tramite PayPal.
Di conseguenza, come attivare rinnovo kaspersky? Se hai acquistato l'applicazione Kaspersky tramite il tuo provider di servizi Internet, visualizzerai i pulsanti Verifica stato e Gestisci abbonamento. Contatta il servizio di supporto del tuo provider Internet per rinnovare la licenza. Immetti il codice di attivazione e fai clic su Salva codice di attivazione.
-
La versione ESD come scritto sopra, è un codice che viene inviato per email, tutte le altre versioni hanno una spedizione fisica: Il COA è un adesivo che viene di solito applicato sui pc con scritto sopra la product key, il DVD BOX (o System Builder) è una scatola con all'interno il DVD e la product key di attivazione e infine la KeyCard è una Scheda con sopra scritta la product key di attivazione per quel prodotto.
-
Cercare di attivare il software utilizzando un codice di attivazione in un paese mentre era destinato a un'altra regione sembra una possibilità a lungo termine. Ottenere lo stesso messaggio di errore è frustrante, ma potresti superarlo.
-
Innanzitutto, il modo migliore per risolvere questo problema con il codice di attivazione è utilizzare una nuova licenza che può essere attivata nel tuo paese. Questa soluzione arriva con la sua parte di delusione, ma è coerente con la politica sul copyright di Kaspersky Lab.
-
Dopo la corretta installazione VPN, riattiva Kaspersky. La soluzione qui descritta dovrebbe risolvere eventuali problemi con il codice di attivazione. Tuttavia, non ti risparmierà i soliti problemi VPN che potresti riscontrare.
-
Kaspersky Internet Security 2022 offre tutto questo e molto altro. Con questo prodotto Kaspersky Internet Security si ottiene 1 dispositivo, 1 anno e 1 codice di attivazione. Inoltre, è dotato di: - Controllo parentale avanzato che consente di controllare ciò che i bambini fanno online con una serie di regole rigorose.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/gradio/HuBERT/examples/backtranslation/sacrebleu.sh b/spaces/gradio/HuBERT/examples/backtranslation/sacrebleu.sh
deleted file mode 100644
index a70da23f48e2699297799611412783d4560dc45a..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/examples/backtranslation/sacrebleu.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-if [ $# -ne 5 ]; then
- echo "usage: $0 [dataset=wmt14/full] [langpair=en-de] [databin] [bpecode] [model]"
- exit
-fi
-
-
-DATASET=$1
-LANGPAIR=$2
-DATABIN=$3
-BPECODE=$4
-MODEL=$5
-
-SRCLANG=$(echo $LANGPAIR | cut -d '-' -f 1)
-TGTLANG=$(echo $LANGPAIR | cut -d '-' -f 2)
-
-
-BPEROOT=examples/backtranslation/subword-nmt/subword_nmt
-if [ ! -e $BPEROOT ]; then
- BPEROOT=subword-nmt/subword_nmt
- if [ ! -e $BPEROOT ]; then
- echo 'Cloning Subword NMT repository (for BPE pre-processing)...'
- git clone https://github.com/rsennrich/subword-nmt.git
- fi
-fi
-
-
-sacrebleu -t $DATASET -l $LANGPAIR --echo src \
-| sacremoses tokenize -a -l $SRCLANG -q \
-| python $BPEROOT/apply_bpe.py -c $BPECODE \
-| fairseq-interactive $DATABIN --path $MODEL \
- -s $SRCLANG -t $TGTLANG \
- --beam 5 --remove-bpe --buffer-size 1024 --max-tokens 8000 \
-| grep ^H- | cut -f 3- \
-| sacremoses detokenize -l $TGTLANG -q \
-| sacrebleu -t $DATASET -l $LANGPAIR
diff --git a/spaces/gradio/HuBERT/examples/byte_level_bpe/gru_transformer.py b/spaces/gradio/HuBERT/examples/byte_level_bpe/gru_transformer.py
deleted file mode 100644
index d4efa93a4d75da71c78e786d7f62101ef3266af4..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/examples/byte_level_bpe/gru_transformer.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch.nn as nn
-import torch.nn.functional as F
-from fairseq.models import register_model, register_model_architecture
-from fairseq.models.transformer import TransformerEncoder, TransformerModel
-
-
-@register_model("gru_transformer")
-class GRUTransformerModel(TransformerModel):
- @classmethod
- def build_encoder(cls, args, src_dict, embed_tokens):
- return GRUTransformerEncoder(args, src_dict, embed_tokens)
-
-
-class GRUTransformerEncoder(TransformerEncoder):
- def __init__(self, args, dictionary, embed_tokens):
- super().__init__(args, dictionary, embed_tokens)
- self.emb_ctx = nn.GRU(
- input_size=embed_tokens.embedding_dim,
- hidden_size=embed_tokens.embedding_dim // 2,
- num_layers=1,
- bidirectional=True,
- )
-
- def forward_embedding(self, src_tokens):
- # embed tokens and positions
- x = embed = self.embed_scale * self.embed_tokens(src_tokens)
- if self.embed_positions is not None:
- x = embed + self.embed_positions(src_tokens)
-
- # contextualize embeddings
- x = x.transpose(0, 1)
- x = self.dropout_module(x)
- x, _ = self.emb_ctx.forward(x)
- x = x.transpose(0, 1)
-
- if self.layernorm_embedding is not None:
- x = self.layernorm_embedding(x)
- x = self.dropout_module(x)
- return x, embed
-
-
-@register_model_architecture("gru_transformer", "gru_transformer")
-def gru_transformer_base_architecture(args):
- args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
- args.encoder_layers = getattr(args, "encoder_layers", 6)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
- args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
- args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
- args.decoder_ffn_embed_dim = getattr(
- args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
- )
- args.decoder_layers = getattr(args, "decoder_layers", 6)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
- args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
- args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
- args.attention_dropout = getattr(args, "attention_dropout", 0.0)
- args.activation_dropout = getattr(args, "activation_dropout", 0.0)
- args.activation_fn = getattr(args, "activation_fn", "relu")
- args.dropout = getattr(args, "dropout", 0.1)
- args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
- args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
- args.share_decoder_input_output_embed = getattr(
- args, "share_decoder_input_output_embed", False
- )
- args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
- args.no_token_positional_embeddings = getattr(
- args, "no_token_positional_embeddings", False
- )
- args.adaptive_input = getattr(args, "adaptive_input", False)
- args.no_cross_attention = getattr(args, "no_cross_attention", False)
- args.cross_self_attention = getattr(args, "cross_self_attention", False)
- args.layer_wise_attention = getattr(args, "layer_wise_attention", False)
-
- args.decoder_output_dim = getattr(
- args, "decoder_output_dim", args.decoder_embed_dim
- )
- args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
-
- args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
- args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
-
-
-@register_model_architecture("gru_transformer", "gru_transformer_big")
-def gru_transformer_big(args):
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
- args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
- args.dropout = getattr(args, "dropout", 0.3)
- gru_transformer_base_architecture(args)
diff --git a/spaces/gradio/HuBERT/fairseq/models/roberta/hub_interface.py b/spaces/gradio/HuBERT/fairseq/models/roberta/hub_interface.py
deleted file mode 100644
index c9af434bde61f399a4eebaafd5811be9a37d538e..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/fairseq/models/roberta/hub_interface.py
+++ /dev/null
@@ -1,235 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.data import encoders
-
-
-class RobertaHubInterface(nn.Module):
- """A simple PyTorch Hub interface to RoBERTa.
-
- Usage: https://github.com/pytorch/fairseq/tree/master/examples/roberta
- """
-
- def __init__(self, cfg, task, model):
- super().__init__()
- self.cfg = cfg
- self.task = task
- self.model = model
-
- self.bpe = encoders.build_bpe(cfg.bpe)
-
- # this is useful for determining the device
- self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float))
-
- @property
- def device(self):
- return self._float_tensor.device
-
- def encode(
- self, sentence: str, *addl_sentences, no_separator=False
- ) -> torch.LongTensor:
- """
- BPE-encode a sentence (or multiple sentences).
-
- Every sequence begins with a beginning-of-sentence (``) symbol.
- Every sentence ends with an end-of-sentence (``) and we use an
- extra end-of-sentence (``) as a separator.
-
- Example (single sentence): ` a b c `
- Example (sentence pair): ` d e f 1 2 3 `
-
- The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE
- requires leading spaces. For example::
-
- >>> roberta.encode('Hello world').tolist()
- [0, 31414, 232, 2]
- >>> roberta.encode(' world').tolist()
- [0, 232, 2]
- >>> roberta.encode('world').tolist()
- [0, 8331, 2]
- """
- bpe_sentence = " " + self.bpe.encode(sentence) + " "
- for s in addl_sentences:
- bpe_sentence += " " if not no_separator else ""
- bpe_sentence += " " + self.bpe.encode(s) + " "
- tokens = self.task.source_dictionary.encode_line(
- bpe_sentence, append_eos=False, add_if_not_exist=False
- )
- return tokens.long()
-
- def decode(self, tokens: torch.LongTensor):
- assert tokens.dim() == 1
- tokens = tokens.numpy()
- if tokens[0] == self.task.source_dictionary.bos():
- tokens = tokens[1:] # remove
- eos_mask = tokens == self.task.source_dictionary.eos()
- doc_mask = eos_mask[1:] & eos_mask[:-1]
- sentences = np.split(tokens, doc_mask.nonzero()[0] + 1)
- sentences = [
- self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences
- ]
- if len(sentences) == 1:
- return sentences[0]
- return sentences
-
- def extract_features(
- self, tokens: torch.LongTensor, return_all_hiddens: bool = False
- ) -> torch.Tensor:
- if tokens.dim() == 1:
- tokens = tokens.unsqueeze(0)
- if tokens.size(-1) > self.model.max_positions():
- raise ValueError(
- "tokens exceeds maximum length: {} > {}".format(
- tokens.size(-1), self.model.max_positions()
- )
- )
- features, extra = self.model(
- tokens.to(device=self.device),
- features_only=True,
- return_all_hiddens=return_all_hiddens,
- )
- if return_all_hiddens:
- # convert from T x B x C -> B x T x C
- inner_states = extra["inner_states"]
- return [inner_state.transpose(0, 1) for inner_state in inner_states]
- else:
- return features # just the last layer's features
-
- def register_classification_head(
- self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs
- ):
- self.model.register_classification_head(
- name, num_classes=num_classes, embedding_size=embedding_size, **kwargs
- )
-
- def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False):
- features = self.extract_features(tokens.to(device=self.device))
- logits = self.model.classification_heads[head](features)
- if return_logits:
- return logits
- return F.log_softmax(logits, dim=-1)
-
- def extract_features_aligned_to_words(
- self, sentence: str, return_all_hiddens: bool = False
- ) -> torch.Tensor:
- """Extract RoBERTa features, aligned to spaCy's word-level tokenizer."""
- from fairseq.models.roberta import alignment_utils
- from spacy.tokens import Doc
-
- nlp = alignment_utils.spacy_nlp()
- tokenizer = alignment_utils.spacy_tokenizer()
-
- # tokenize both with GPT-2 BPE and spaCy
- bpe_toks = self.encode(sentence)
- spacy_toks = tokenizer(sentence)
- spacy_toks_ws = [t.text_with_ws for t in tokenizer(sentence)]
- alignment = alignment_utils.align_bpe_to_words(self, bpe_toks, spacy_toks_ws)
-
- # extract features and align them
- features = self.extract_features(
- bpe_toks, return_all_hiddens=return_all_hiddens
- )
- features = features.squeeze(0)
- aligned_feats = alignment_utils.align_features_to_words(
- self, features, alignment
- )
-
- # wrap in spaCy Doc
- doc = Doc(
- nlp.vocab,
- words=[""] + [x.text for x in spacy_toks] + [""],
- spaces=[True]
- + [x.endswith(" ") for x in spacy_toks_ws[:-1]]
- + [True, False],
- )
- assert len(doc) == aligned_feats.size(0)
- doc.user_token_hooks["vector"] = lambda token: aligned_feats[token.i]
- return doc
-
- def fill_mask(self, masked_input: str, topk: int = 5):
- masked_token = ""
- assert (
- masked_token in masked_input and masked_input.count(masked_token) == 1
- ), "Please add one {0} token for the input, eg: 'He is a {0} guy'".format(
- masked_token
- )
-
- text_spans = masked_input.split(masked_token)
- text_spans_bpe = (
- (" {0} ".format(masked_token))
- .join([self.bpe.encode(text_span.rstrip()) for text_span in text_spans])
- .strip()
- )
- tokens = self.task.source_dictionary.encode_line(
- " " + text_spans_bpe + " ",
- append_eos=False,
- add_if_not_exist=False,
- )
-
- masked_index = (tokens == self.task.mask_idx).nonzero(as_tuple=False)
- if tokens.dim() == 1:
- tokens = tokens.unsqueeze(0)
-
- with utils.model_eval(self.model):
- features, extra = self.model(
- tokens.long().to(device=self.device),
- features_only=False,
- return_all_hiddens=False,
- )
- logits = features[0, masked_index, :].squeeze()
- prob = logits.softmax(dim=0)
- values, index = prob.topk(k=topk, dim=0)
- topk_predicted_token_bpe = self.task.source_dictionary.string(index)
-
- topk_filled_outputs = []
- for index, predicted_token_bpe in enumerate(
- topk_predicted_token_bpe.split(" ")
- ):
- predicted_token = self.bpe.decode(predicted_token_bpe)
- # Quick hack to fix https://github.com/pytorch/fairseq/issues/1306
- if predicted_token_bpe.startswith("\u2581"):
- predicted_token = " " + predicted_token
- if " {0}".format(masked_token) in masked_input:
- topk_filled_outputs.append(
- (
- masked_input.replace(
- " {0}".format(masked_token), predicted_token
- ),
- values[index].item(),
- predicted_token,
- )
- )
- else:
- topk_filled_outputs.append(
- (
- masked_input.replace(masked_token, predicted_token),
- values[index].item(),
- predicted_token,
- )
- )
- return topk_filled_outputs
-
- def disambiguate_pronoun(self, sentence: str) -> bool:
- """
- Usage::
-
- >>> disambiguate_pronoun('The _trophy_ would not fit in the brown suitcase because [it] was too big.')
- True
-
- >>> disambiguate_pronoun('The trophy would not fit in the brown suitcase because [it] was too big.')
- 'The trophy'
- """
- assert hasattr(
- self.task, "disambiguate_pronoun"
- ), "roberta.disambiguate_pronoun() requires a model trained with the WSC task."
- with utils.model_eval(self.model):
- return self.task.disambiguate_pronoun(
- self.model, sentence, use_cuda=self.device.type == "cuda"
- )
diff --git a/spaces/guardiancc/video-face-swap/roop/ui.py b/spaces/guardiancc/video-face-swap/roop/ui.py
deleted file mode 100644
index ba693dac116bd416b91518734fa550e9dfb95c7b..0000000000000000000000000000000000000000
--- a/spaces/guardiancc/video-face-swap/roop/ui.py
+++ /dev/null
@@ -1,231 +0,0 @@
-import os
-import webbrowser
-import customtkinter as ctk
-from typing import Callable, Tuple
-import cv2
-from PIL import Image, ImageOps
-
-import roop.globals
-import roop.metadata
-from roop.face_analyser import get_one_face
-from roop.capturer import get_video_frame, get_video_frame_total
-from roop.predicter import predict_frame
-from roop.processors.frame.core import get_frame_processors_modules
-from roop.utilities import is_image, is_video, resolve_relative_path
-
-ROOT = None
-ROOT_HEIGHT = 700
-ROOT_WIDTH = 600
-
-PREVIEW = None
-PREVIEW_MAX_HEIGHT = 700
-PREVIEW_MAX_WIDTH = 1200
-
-RECENT_DIRECTORY_SOURCE = None
-RECENT_DIRECTORY_TARGET = None
-RECENT_DIRECTORY_OUTPUT = None
-
-preview_label = None
-preview_slider = None
-source_label = None
-target_label = None
-status_label = None
-
-
-def init(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
- global ROOT, PREVIEW
-
- ROOT = create_root(start, destroy)
- PREVIEW = create_preview(ROOT)
-
- return ROOT
-
-
-def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
- global source_label, target_label, status_label
-
- ctk.deactivate_automatic_dpi_awareness()
- ctk.set_appearance_mode('system')
- ctk.set_default_color_theme(resolve_relative_path('ui.json'))
-
- root = ctk.CTk()
- root.minsize(ROOT_WIDTH, ROOT_HEIGHT)
- root.title(f'{roop.metadata.name} {roop.metadata.version}')
- root.configure()
- root.protocol('WM_DELETE_WINDOW', lambda: destroy())
-
- source_label = ctk.CTkLabel(root, text=None)
- source_label.place(relx=0.1, rely=0.1, relwidth=0.3, relheight=0.25)
-
- target_label = ctk.CTkLabel(root, text=None)
- target_label.place(relx=0.6, rely=0.1, relwidth=0.3, relheight=0.25)
-
- source_button = ctk.CTkButton(root, text='Select a face', cursor='hand2', command=lambda: select_source_path())
- source_button.place(relx=0.1, rely=0.4, relwidth=0.3, relheight=0.1)
-
- target_button = ctk.CTkButton(root, text='Select a target', cursor='hand2', command=lambda: select_target_path())
- target_button.place(relx=0.6, rely=0.4, relwidth=0.3, relheight=0.1)
-
- keep_fps_value = ctk.BooleanVar(value=roop.globals.keep_fps)
- keep_fps_checkbox = ctk.CTkSwitch(root, text='Keep fps', variable=keep_fps_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_fps', not roop.globals.keep_fps))
- keep_fps_checkbox.place(relx=0.1, rely=0.6)
-
- keep_frames_value = ctk.BooleanVar(value=roop.globals.keep_frames)
- keep_frames_switch = ctk.CTkSwitch(root, text='Keep frames', variable=keep_frames_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_frames', keep_frames_value.get()))
- keep_frames_switch.place(relx=0.1, rely=0.65)
-
- keep_audio_value = ctk.BooleanVar(value=roop.globals.keep_audio)
- keep_audio_switch = ctk.CTkSwitch(root, text='Keep audio', variable=keep_audio_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_audio', keep_audio_value.get()))
- keep_audio_switch.place(relx=0.6, rely=0.6)
-
- many_faces_value = ctk.BooleanVar(value=roop.globals.many_faces)
- many_faces_switch = ctk.CTkSwitch(root, text='Many faces', variable=many_faces_value, cursor='hand2', command=lambda: setattr(roop.globals, 'many_faces', many_faces_value.get()))
- many_faces_switch.place(relx=0.6, rely=0.65)
-
- start_button = ctk.CTkButton(root, text='Start', cursor='hand2', command=lambda: select_output_path(start))
- start_button.place(relx=0.15, rely=0.75, relwidth=0.2, relheight=0.05)
-
- stop_button = ctk.CTkButton(root, text='Destroy', cursor='hand2', command=lambda: destroy())
- stop_button.place(relx=0.4, rely=0.75, relwidth=0.2, relheight=0.05)
-
- preview_button = ctk.CTkButton(root, text='Preview', cursor='hand2', command=lambda: toggle_preview())
- preview_button.place(relx=0.65, rely=0.75, relwidth=0.2, relheight=0.05)
-
- status_label = ctk.CTkLabel(root, text=None, justify='center')
- status_label.place(relx=0.1, rely=0.9, relwidth=0.8)
-
- donate_label = ctk.CTkLabel(root, text='^_^ Donate to project ^_^', justify='center', cursor='hand2')
- donate_label.place(relx=0.1, rely=0.95, relwidth=0.8)
- donate_label.configure(text_color=ctk.ThemeManager.theme.get('RoopDonate').get('text_color'))
- donate_label.bind('